Port a bunch of network drivers for low-quality NICS (which will incur extra copying overheads within Xen). But will allow us to work on a wider range of systems at least.
3ddb79bdPyAvT_WZTAFhaX0jp-yXSw xen/drivers/ide/ide_modes.h
3e4a8d401aSwOzCScQXR3lsmNlAwUQ xen/drivers/ide/piix.c
3ddb79bfMlOcWUwjtg6oMYhGySHDDw xen/drivers/net/3c59x.c
+3f0c423bjmEpn1Nbk1Q8fv8ElccwAA xen/drivers/net/8139too.c
3ddb79c0tWiE8xIFHszxipeVCGKTSA xen/drivers/net/Makefile
+3f0c4247730LYUgz3p5ziYqy-s_glw xen/drivers/net/SUPPORTED_CARDS
3ddb79bfU-H1Hms4BuJEPPydjXUEaQ xen/drivers/net/Space.c
+3f0c428e41JP96bh-J0jnX59vJyUeQ xen/drivers/net/e100/LICENSE
+3f0c428es_xrZnnZQXXHhjzuqj9CTg xen/drivers/net/e100/Makefile
+3f0c428eCEnifr-r6XCZKUkzIEHdYw xen/drivers/net/e100/e100.h
+3f0c428eyYDJIqvO7W_QbgnGnbgPqA xen/drivers/net/e100/e100_config.c
+3f0c428e-NvNXlOyIw8H3YllFL6lfA xen/drivers/net/e100/e100_config.h
+3f0c428ecPXSlA_4rp_-6O09uQqHbg xen/drivers/net/e100/e100_eeprom.c
+3f0c428eWlXUzaulDjS1UpUP7Ckejg xen/drivers/net/e100/e100_main.c
+3f0c428eKW1sEaCen-HIMUwMtQXnVQ xen/drivers/net/e100/e100_phy.c
+3f0c428eR00XnyLIZKb5H5QF_I9QDg xen/drivers/net/e100/e100_phy.h
+3f0c428eII-G2i_Qv_aCQjLzHcDKEQ xen/drivers/net/e100/e100_test.c
+3f0c428eJ_gqbOFzS3bWSI7geA--Eg xen/drivers/net/e100/e100_ucode.h
3e4540ccS4bfbx9rLiLElP0F1OVwZA xen/drivers/net/e1000/LICENSE
3e4540ccXG6af_6-u0IiKKvtdGHJyA xen/drivers/net/e1000/Makefile
3e4540ccoY2eo4VIkbR4sCOj0bVzSA xen/drivers/net/e1000/e1000.h
3e4540ccvQ9Dtoh9tV-L3ULUwN9X7g xen/drivers/net/e1000/e1000_main.c
3e4540cc3t7_y-YLeyMG2pX9xtdXPA xen/drivers/net/e1000/e1000_osdep.h
3e4540cct_8Ig-Y1W_vM2gS_u7mC0A xen/drivers/net/e1000/e1000_param.c
-3e465c00t2nochqR27eEY_FBjxsUCw xen/drivers/net/ne/8390.c
-3e465c00AIRmk20x1vYETtnL71eGvA xen/drivers/net/ne/8390.h
-3e465c00UIvPTAtAcgcQWCVFa2bwww xen/drivers/net/ne/Makefile
-3e465c00rWSHiXmHuOWLRf7r2n8S3g xen/drivers/net/ne/ne.c
3ddb79bfKvn9mt0kofpkw0QaWjxO6A xen/drivers/net/net_init.c
+3f0c428exbF4as5zi8GyGyDSUITmxg xen/drivers/net/pcnet32.c
3ddb79bf_CBcu3QWYwq4bNAOnM2RqQ xen/drivers/net/setup.c
3e45a0c6u66EL2AI36eLOmf_abXs7g xen/drivers/net/tg3.c
3e45a0c6yrXj5pmQT0PvVSJ01YLABQ xen/drivers/net/tg3.h
+3f0c428e0tn3ZknveWidRNtHYcyHzw xen/drivers/net/tulip/21142.c
+3f0c428esCXQWoJHSJ5zUZ7iQc_rlQ xen/drivers/net/tulip/ChangeLog
+3f0c428eB8pVjwDolFZDa0YZMQW1rw xen/drivers/net/tulip/Makefile
+3f0c428el4rtJgjSoK_K04g5Udg2Tw xen/drivers/net/tulip/eeprom.c
+3f0c428eYyDHq5AnrB60p3g0uXWYwA xen/drivers/net/tulip/interrupt.c
+3f0c428eI99GG-HH2aQCIQ4tyISe8w xen/drivers/net/tulip/media.c
+3f0c428eErjUSQiTzPJJlSrR0ORJog xen/drivers/net/tulip/pnic.c
+3f0c428eyyo0-aI_fj4gDBiNYe-Idw xen/drivers/net/tulip/pnic2.c
+3f0c428eZQhoTueBPxOO5YbuzoTofw xen/drivers/net/tulip/timer.c
+3f0c428eoDFGJZJrBRnA7918Yovt0A xen/drivers/net/tulip/tulip.h
+3f0c428e46klSV-upTm6D8tCdzQs8Q xen/drivers/net/tulip/tulip_core.c
+3f0c428eu_CTRPhmTgGIjoiGI_PmrA xen/drivers/net/via-rhine.c
3ddb79beUWngyIhMHgyPtuTem4o4JA xen/drivers/pci/Makefile
3ddb79beU9td0Mnm0VUMklerBa37qQ xen/drivers/pci/compat.c
3ddb79beHkGQE58z5t5gyUCYiwOxvw xen/drivers/pci/gen-devlist.c
3e9c248aEG_nCngztiFmv5CfayNkcA xen/include/xeno/cdrom.h
3ddb79c259jh8hE7vre_8NuE7nwNSA xen/include/xeno/config.h
3eb165e0eawr3R-p2ZQtSdLWtLRN_A xen/include/xeno/console.h
+3f0c428eIwGr7n9fj4FkBdX2YvA_Rw xen/include/xeno/crc32.h
3ddb79c1V44RD26YqCUm-kqIupM37A xen/include/xeno/ctype.h
3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xeno/delay.h
3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/xeno/dom_mem_ops.h
--- /dev/null
+/*
+
+ 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux.
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000-2002 Jeff Garzik
+
+ Much code comes from Donald Becker's rtl8139.c driver,
+ versions 1.13 and older. This driver was originally based
+ on rtl8139.c version 1.07. Header of rtl8139.c version 1.13:
+
+ -----<snip>-----
+
+ Written 1997-2001 by Donald Becker.
+ This software may be used and distributed according to the
+ terms of the GNU General Public License (GPL), incorporated
+ herein by reference. Drivers based on or derived from this
+ code fall under the GPL and must retain the authorship,
+ copyright and license notice. This file is not a complete
+ program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139
+ PCI ethernet chips.
+
+ The author may be reached as becker@scyld.com, or C/O Scyld
+ Computing Corporation 410 Severn Ave., Suite 210 Annapolis
+ MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston
+ <shangh@realtek.com.tw>.
+
+ -----<snip>-----
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Contributors:
+
+ Donald Becker - he wrote the original driver, kudos to him!
+ (but please don't e-mail him for support, this isn't his driver)
+
+ Tigran Aivazian - bug fixes, skbuff free cleanup
+
+ Martin Mares - suggestions for PCI cleanup
+
+ David S. Miller - PCI DMA and softnet updates
+
+ Ernst Gill - fixes ported from BSD driver
+
+ Daniel Kobras - identified specific locations of
+ posted MMIO write bugginess
+
+ Gerard Sharp - bug fix, testing and feedback
+
+ David Ford - Rx ring wrap fix
+
+ Dan DeMaggio - swapped RTL8139 cards with me, and allowed me
+ to find and fix a crucial bug on older chipsets.
+
+ Donald Becker/Chris Butterworth/Marcus Westergren -
+ Noticed various Rx packet size-related buglets.
+
+ Santiago Garcia Mantinan - testing and feedback
+
+ Jens David - 2.2.x kernel backports
+
+ Martin Dennett - incredibly helpful insight on undocumented
+ features of the 8139 chips
+
+ Jean-Jacques Michel - bug fix
+
+ Tobias Ringström - Rx interrupt status checking suggestion
+
+ Andrew Morton - Clear blocked signals, avoid
+ buffer overrun setting current->comm.
+
+ Kalle Olavi Niemitalo - Wake-on-LAN ioctls
+
+ Robert Kuebel - Save kernel thread from dying on any signal.
+
+ Submitting bug reports:
+
+ "rtl8139-diag -mmmaaavvveefN" output
+ enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log
+
+ See 8139too.txt for more details.
+
+*/
+
+#define DRV_NAME "8139too"
+#define DRV_VERSION "0.9.26"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/lib.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */
+static int next_tick = 3 * HZ;
+
+#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+
+/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
+#ifdef CONFIG_8139TOO_PIO
+#define USE_IO_OPS 1
+#endif
+
+/* define to 1 to enable copious debugging info */
+#undef RTL8139_DEBUG
+
+/* define to 1 to disable lightweight runtime debugging checks */
+#undef RTL8139_NDEBUG
+
+
+#ifdef RTL8139_DEBUG
+/* note: prints function name for you */
+# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#else
+# define DPRINTK(fmt, args...)
+#endif
+
+#ifdef RTL8139_NDEBUG
+# define assert(expr) do {} while (0)
+#else
+# define assert(expr) \
+ if(!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
+#endif
+
+
+/* A few user-configurable values. */
+/* media options */
+#define MAX_UNITS 8
+static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+#if 0
+/* bitmapped message enable number */
+static int debug = -1;
+#endif
+
+/* Size of the in-memory receive ring. */
+#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
+#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD 16
+#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC 4
+
+/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE 1536
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+
+enum {
+ HAS_MII_XCVR = 0x010000,
+ HAS_CHIP_XCVR = 0x020000,
+ HAS_LNK_CHNG = 0x040000,
+};
+
+#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */
+#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */
+#define RTL_MIN_IO_SIZE 0x80
+#define RTL8139B_IO_SIZE 256
+
+#define RTL8129_CAPS HAS_MII_XCVR
+#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+
+typedef enum {
+ RTL8139 = 0,
+ RTL8139_CB,
+ SMC1211TX,
+ /*MPX5030,*/
+ DELTA8139,
+ ADDTRON8139,
+ DFE538TX,
+ DFE690TXD,
+ FE2000VX,
+ ALLIED8139,
+ RTL8129,
+ FNW3603TX,
+ FNW3800TX,
+} board_t;
+
+
+/* indexed by board_t, above */
+static struct {
+ const char *name;
+ u32 hw_flags;
+} board_info[] __devinitdata = {
+ { "RealTek RTL8139 Fast Ethernet", RTL8139_CAPS },
+ { "RealTek RTL8139B PCI/CardBus", RTL8139_CAPS },
+ { "SMC1211TX EZCard 10/100 (RealTek RTL8139)", RTL8139_CAPS },
+/* { MPX5030, "Accton MPX5030 (RealTek RTL8139)", RTL8139_CAPS },*/
+ { "Delta Electronics 8139 10/100BaseTX", RTL8139_CAPS },
+ { "Addtron Technolgy 8139 10/100BaseTX", RTL8139_CAPS },
+ { "D-Link DFE-538TX (RealTek RTL8139)", RTL8139_CAPS },
+ { "D-Link DFE-690TXD (RealTek RTL8139)", RTL8139_CAPS },
+ { "AboCom FE2000VX (RealTek RTL8139)", RTL8139_CAPS },
+ { "Allied Telesyn 8139 CardBus", RTL8139_CAPS },
+ { "RealTek RTL8129", RTL8129_CAPS },
+ { "Planex FNW-3603-TX 10/100 CardBus", RTL8139_CAPS },
+ { "Planex FNW-3800-TX 10/100 CardBus", RTL8139_CAPS },
+};
+
+
+static struct pci_device_id rtl8139_pci_tbl[] __devinitdata = {
+ {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139_CB },
+ {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
+/* {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MPX5030 },*/
+ {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DELTA8139 },
+ {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
+ {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DFE538TX },
+ {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DFE690TXD },
+ {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FE2000VX },
+ {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ALLIED8139 },
+ {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FNW3603TX },
+ {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FNW3800TX },
+
+#ifdef CONFIG_8139TOO_8129
+ {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
+#endif
+
+ /* some crazy cards report invalid vendor ids like
+ * 0x0001 here. The other ids are valid and constant,
+ * so we simply don't match on the main vendor id.
+ */
+ {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
+ {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, DFE538TX },
+ {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, FE2000VX },
+
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
+
+#if 0
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "early_rx" },
+ { "tx_buf_mapped" },
+ { "tx_timeouts" },
+ { "rx_lost_in_ring" },
+};
+#endif
+
+/* The rest of these values should never change. */
+
+/* Symbolic offsets to registers. */
+enum RTL8139_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAR0 = 8, /* Multicast filter. */
+ TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf = 0x30,
+ ChipCmd = 0x37,
+ RxBufPtr = 0x38,
+ RxBufAddr = 0x3A,
+ IntrMask = 0x3C,
+ IntrStatus = 0x3E,
+ TxConfig = 0x40,
+ ChipVersion = 0x43,
+ RxConfig = 0x44,
+ Timer = 0x48, /* A general-purpose counter. */
+ RxMissed = 0x4C, /* 24 bits valid, write clears. */
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ FlashReg = 0x54,
+ MediaStatus = 0x58,
+ Config3 = 0x59,
+ Config4 = 0x5A, /* absent on RTL-8139A */
+ HltClk = 0x5B,
+ MultiIntr = 0x5C,
+ TxSummary = 0x60,
+ BasicModeCtrl = 0x62,
+ BasicModeStatus = 0x64,
+ NWayAdvert = 0x66,
+ NWayLPAR = 0x68,
+ NWayExpansion = 0x6A,
+ /* Undocumented registers, but required for proper operation. */
+ FIFOTMS = 0x70, /* FIFO Control and test. */
+ CSCR = 0x74, /* Chip Status and Configuration Register. */
+ PARA78 = 0x78,
+ PARA7c = 0x7c, /* Magic transceiver parameter register. */
+ Config5 = 0xD8, /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+ MultiIntrClear = 0xF000,
+ ChipCmdClear = 0xE2,
+ Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+};
+
+enum ChipCmdBits {
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr = 0x8000,
+ PCSTimeout = 0x4000,
+ RxFIFOOver = 0x40,
+ RxUnderrun = 0x20,
+ RxOverflow = 0x10,
+ TxErr = 0x08,
+ TxOK = 0x04,
+ RxErr = 0x02,
+ RxOK = 0x01,
+
+ RxAckBits = RxFIFOOver | RxOverflow | RxOK,
+};
+
+enum TxStatusBits {
+ TxHostOwns = 0x2000,
+ TxUnderrun = 0x4000,
+ TxStatOK = 0x8000,
+ TxOutOfWindow = 0x20000000,
+ TxAborted = 0x40000000,
+ TxCarrierLost = 0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast = 0x8000,
+ RxPhysical = 0x4000,
+ RxBroadcast = 0x2000,
+ RxBadSymbol = 0x0020,
+ RxRunt = 0x0010,
+ RxTooLong = 0x0008,
+ RxCRCErr = 0x0004,
+ RxBadAlign = 0x0002,
+ RxStatusOK = 0x0001,
+};
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+};
+
+/* Bits in TxConfig. */
+enum tx_config_bits {
+ TxIFG1 = (1 << 25), /* Interframe Gap Time */
+ TxIFG0 = (1 << 24), /* Enabling these bits violates IEEE 802.3 */
+ TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
+ TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
+ TxClearAbt = (1 << 0), /* Clear abort (WO) */
+ TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */
+ TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */
+
+ TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
+};
+
+/* Bits in Config1 */
+enum Config1Bits {
+ Cfg1_PM_Enable = 0x01,
+ Cfg1_VPD_Enable = 0x02,
+ Cfg1_PIO = 0x04,
+ Cfg1_MMIO = 0x08,
+ LWAKE = 0x10, /* not on 8139, 8139A */
+ Cfg1_Driver_Load = 0x20,
+ Cfg1_LED0 = 0x40,
+ Cfg1_LED1 = 0x80,
+ SLEEP = (1 << 1), /* only on 8139, 8139A */
+ PWRDN = (1 << 0), /* only on 8139, 8139A */
+};
+
+/* Bits in Config3 */
+enum Config3Bits {
+ Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */
+ Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
+ Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
+ Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */
+ Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */
+ Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
+ Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */
+ Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
+};
+
+/* Bits in Config4 */
+enum Config4Bits {
+ LWPTN = (1 << 2), /* not on 8139, 8139A */
+};
+
+/* Bits in Config5 */
+enum Config5Bits {
+ Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */
+ Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */
+ Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */
+ Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */
+ Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */
+ Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */
+ Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */
+};
+
+enum RxConfigBits {
+ /* rx fifo threshold */
+ RxCfgFIFOShift = 13,
+ RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+ /* Max DMA burst */
+ RxCfgDMAShift = 8,
+ RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+ /* rx ring buffer length */
+ RxCfgRcv8K = 0,
+ RxCfgRcv16K = (1 << 11),
+ RxCfgRcv32K = (1 << 12),
+ RxCfgRcv64K = (1 << 11) | (1 << 12),
+
+ /* Disable packet wrap at end of Rx buffer */
+ RxNoWrap = (1 << 7),
+};
+
+
+/* Twister tuning parameters from RealTek.
+ Completely undocumented, but required to tune bad links on some boards. */
+enum CSCRBits {
+ CSCR_LinkOKBit = 0x0400,
+ CSCR_LinkChangeBit = 0x0800,
+ CSCR_LinkStatusBits = 0x0f000,
+ CSCR_LinkDownOffCmd = 0x003c0,
+ CSCR_LinkDownCmd = 0x0f3c0,
+};
+
+
+enum Cfg9346Bits {
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xC0,
+};
+
+#ifdef CONFIG_8139TOO_TUNE_TWISTER
+
+enum TwisterParamVals {
+ PARA78_default = 0x78fa8388,
+ PARA7c_default = 0xcb38de43, /* param[0][3] */
+ PARA7c_xxx = 0xcb38de43,
+};
+
+static const unsigned long param[4][4] = {
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+#endif /* CONFIG_8139TOO_TUNE_TWISTER */
+
+typedef enum {
+ CH_8139 = 0,
+ CH_8139_K,
+ CH_8139A,
+ CH_8139B,
+ CH_8130,
+ CH_8139C,
+} chip_t;
+
+enum chip_flags {
+ HasHltClk = (1 << 0),
+ HasLWake = (1 << 1),
+};
+
+
+/* directly indexed by chip_t, above */
+const static struct {
+ const char *name;
+ u8 version; /* from RTL8139C docs */
+ u32 RxConfigMask; /* should clear the bits supported by this chip */
+ u32 flags;
+} rtl_chip_info[] = {
+ { "RTL-8139",
+ 0x40,
+ 0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ HasHltClk,
+ },
+
+ { "RTL-8139 rev K",
+ 0x60,
+ 0xf0fe0040,
+ HasHltClk,
+ },
+
+ { "RTL-8139A",
+ 0x70,
+ 0xf0fe0040,
+ HasHltClk, /* XXX undocumented? */
+ },
+
+ { "RTL-8139B",
+ 0x78,
+ 0xf0fc0040,
+ HasLWake,
+ },
+
+ { "RTL-8130",
+ 0x7C,
+ 0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ HasLWake,
+ },
+
+ { "RTL-8139C",
+ 0x74,
+ 0xf0fc0040, /* XXX copied from RTL8139B, verify */
+ HasLWake,
+ },
+
+};
+
+struct rtl_extra_stats {
+ unsigned long early_rx;
+ unsigned long tx_buf_mapped;
+ unsigned long tx_timeouts;
+ unsigned long rx_lost_in_ring;
+};
+
+struct rtl8139_private {
+ void *mmio_addr;
+ int drv_flags;
+ struct pci_dev *pci_dev;
+ struct net_device_stats stats;
+ unsigned char *rx_ring;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ unsigned int tx_flag;
+ unsigned long cur_tx;
+ unsigned long dirty_tx;
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_bufs_dma;
+ signed char phys[4]; /* MII device addresses. */
+ char twistie, twist_row, twist_col; /* Twister tune state. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ spinlock_t lock;
+ chip_t chipset;
+#if 0
+ pid_t thr_pid;
+ wait_queue_head_t thr_wait;
+ struct completion thr_exited;
+#else
+ struct timer_list timer;
+#endif
+ u32 rx_config;
+ struct rtl_extra_stats xstats;
+ int time_to_die;
+ struct mii_if_info mii;
+ unsigned int regs_len;
+};
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM (multicast_filter_limit, "i");
+MODULE_PARM (max_interrupt_work, "i");
+MODULE_PARM (media, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM (full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM (debug, "i");
+MODULE_PARM_DESC (debug, "8139too bitmapped message enable number");
+MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (max_interrupt_work, "8139too maximum events handled per interrupt");
+MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
+
+static int read_eeprom (void *ioaddr, int location, int addr_len);
+static int rtl8139_open (struct net_device *dev);
+static int mdio_read (struct net_device *dev, int phy_id, int location);
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int val);
+#if 0
+static int rtl8139_thread (void *data);
+#else
+static void rtl8139_timer (unsigned long arg);
+#endif
+static void rtl8139_tx_timeout (struct net_device *dev);
+static void rtl8139_init_ring (struct net_device *dev);
+static int rtl8139_start_xmit (struct sk_buff *skb,
+ struct net_device *dev);
+static void rtl8139_interrupt (int irq, void *dev_instance,
+ struct pt_regs *regs);
+static int rtl8139_close (struct net_device *dev);
+#if 0
+static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
+static void rtl8139_set_rx_mode (struct net_device *dev);
+static void __set_rx_mode (struct net_device *dev);
+static void rtl8139_hw_start (struct net_device *dev);
+
+#ifdef USE_IO_OPS
+
+#define RTL_R8(reg) inb (((unsigned long)ioaddr) + (reg))
+#define RTL_R16(reg) inw (((unsigned long)ioaddr) + (reg))
+#define RTL_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
+#define RTL_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg))
+#define RTL_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg))
+#define RTL_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define RTL_W8_F RTL_W8
+#define RTL_W16_F RTL_W16
+#define RTL_W32_F RTL_W32
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+
+#else
+
+/* write MMIO register, with flush */
+/* Flush avoids rtl8139 bug w/ posted MMIO writes */
+#define RTL_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
+#define RTL_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
+#define RTL_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+
+
+#define MMIO_FLUSH_AUDIT_COMPLETE 1
+#if MMIO_FLUSH_AUDIT_COMPLETE
+
+/* write MMIO register */
+#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
+
+#else
+
+/* write MMIO register, then flush */
+#define RTL_W8 RTL_W8_F
+#define RTL_W16 RTL_W16_F
+#define RTL_W32 RTL_W32_F
+
+#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
+
+/* read MMIO register */
+#define RTL_R8(reg) readb (ioaddr + (reg))
+#define RTL_R16(reg) readw (ioaddr + (reg))
+#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+
+#endif /* USE_IO_OPS */
+
+
+static const u16 rtl8139_intr_mask =
+ PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
+ TxErr | TxOK | RxErr | RxOK;
+
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv32K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+
+static const unsigned int rtl8139_tx_config =
+ (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
+
+static void __rtl8139_cleanup_dev (struct net_device *dev)
+{
+ struct rtl8139_private *tp;
+ struct pci_dev *pdev;
+
+ assert (dev != NULL);
+ assert (dev->priv != NULL);
+
+ tp = dev->priv;
+ assert (tp->pci_dev != NULL);
+ pdev = tp->pci_dev;
+
+#ifndef USE_IO_OPS
+ if (tp->mmio_addr)
+ iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+
+ /* it's ok to call this even if we have no regions to free */
+ pci_release_regions (pdev);
+
+#ifndef RTL8139_NDEBUG
+ /* poison memory before freeing */
+ memset (dev, 0xBC,
+ sizeof (struct net_device) +
+ sizeof (struct rtl8139_private));
+#endif /* RTL8139_NDEBUG */
+
+ kfree (dev);
+
+ pci_set_drvdata (pdev, NULL);
+}
+
+
+static void rtl8139_chip_reset (void *ioaddr)
+{
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8 (ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--) {
+ barrier();
+ if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+ udelay (10);
+ }
+}
+
+
+static int __devinit rtl8139_init_board (struct pci_dev *pdev,
+ struct net_device **dev_out)
+{
+ void *ioaddr;
+ struct net_device *dev;
+ struct rtl8139_private *tp;
+ u8 tmp8;
+ int rc;
+ unsigned int i;
+ u32 pio_start, pio_end, pio_flags, pio_len;
+ unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ u32 tmp;
+
+ assert (pdev != NULL);
+
+ *dev_out = NULL;
+
+ /* dev and dev->priv zeroed in alloc_etherdev */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (dev == NULL) {
+ printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pdev->slot_name);
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+ tp = dev->priv;
+ tp->pci_dev = pdev;
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device (pdev);
+ if (rc)
+ goto err_out;
+
+ pio_start = pci_resource_start (pdev, 0);
+ pio_end = pci_resource_end (pdev, 0);
+ pio_flags = pci_resource_flags (pdev, 0);
+ pio_len = pci_resource_len (pdev, 0);
+
+ mmio_start = pci_resource_start (pdev, 1);
+ mmio_end = pci_resource_end (pdev, 1);
+ mmio_flags = pci_resource_flags (pdev, 1);
+ mmio_len = pci_resource_len (pdev, 1);
+
+ /* set this immediately, we need to know before
+ * we talk to the chip directly */
+ DPRINTK("PIO region size == 0x%02X\n", pio_len);
+ DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
+
+#ifdef USE_IO_OPS
+ /* make sure PCI base addr 0 is PIO */
+ if (!(pio_flags & IORESOURCE_IO)) {
+ printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pdev->slot_name);
+ rc = -ENODEV;
+ goto err_out;
+ }
+ /* check for weird/broken PCI region reporting */
+ if (pio_len < RTL_MIN_IO_SIZE) {
+ printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pdev->slot_name);
+ rc = -ENODEV;
+ goto err_out;
+ }
+#else
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ printk (KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pdev->slot_name);
+ rc = -ENODEV;
+ goto err_out;
+ }
+ if (mmio_len < RTL_MIN_IO_SIZE) {
+ printk (KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pdev->slot_name);
+ rc = -ENODEV;
+ goto err_out;
+ }
+#endif
+
+ rc = pci_request_regions (pdev, "8139too");
+ if (rc)
+ goto err_out;
+
+ /* enable PCI bus-mastering */
+ pci_set_master (pdev);
+
+#ifdef USE_IO_OPS
+ ioaddr = (void *) pio_start;
+ dev->base_addr = pio_start;
+ tp->mmio_addr = ioaddr;
+ tp->regs_len = pio_len;
+#else
+ /* ioremap MMIO region */
+ ioaddr = ioremap (mmio_start, mmio_len);
+ if (ioaddr == NULL) {
+ printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pdev->slot_name);
+ rc = -EIO;
+ goto err_out;
+ }
+ dev->base_addr = (long) ioaddr;
+ tp->mmio_addr = ioaddr;
+ tp->regs_len = mmio_len;
+#endif /* USE_IO_OPS */
+
+ /* Bring old chips out of low-power mode. */
+ RTL_W8 (HltClk, 'R');
+
+ /* check for missing/broken hardware */
+ if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
+ printk (KERN_ERR PFX "%s: Chip not responding, ignoring board\n",
+ pdev->slot_name);
+ rc = -EIO;
+ goto err_out;
+ }
+
+ /* identify chip attached to board */
+ tmp = RTL_R8 (ChipVersion);
+ for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
+ if (tmp == rtl_chip_info[i].version) {
+ tp->chipset = i;
+ goto match;
+ }
+
+ /* if unknown chip, assume array element #0, original RTL-8139 in this case */
+ printk (KERN_DEBUG PFX "%s: unknown chip version, assuming RTL-8139\n",
+ pdev->slot_name);
+ printk (KERN_DEBUG PFX "%s: TxConfig = 0x%lx\n", pdev->slot_name, RTL_R32 (TxConfig));
+ tp->chipset = 0;
+
+match:
+ DPRINTK ("chipset id (%d) == index %d, '%s'\n",
+ tmp,
+ tp->chipset,
+ rtl_chip_info[tp->chipset].name);
+
+ if (tp->chipset >= CH_8139B) {
+ u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
+ DPRINTK("PCI PM wakeup\n");
+ if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
+ (tmp8 & LWAKE))
+ new_tmp8 &= ~LWAKE;
+ new_tmp8 |= Cfg1_PM_Enable;
+ if (new_tmp8 != tmp8) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tmp8);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+ tmp8 = RTL_R8 (Config4);
+ if (tmp8 & LWPTN)
+ RTL_W8 (Config4, tmp8 & ~LWPTN);
+ }
+ } else {
+ DPRINTK("Old chip wakeup\n");
+ tmp8 = RTL_R8 (Config1);
+ tmp8 &= ~(SLEEP | PWRDN);
+ RTL_W8 (Config1, tmp8);
+ }
+
+ rtl8139_chip_reset (ioaddr);
+
+ *dev_out = dev;
+ return 0;
+
+err_out:
+ __rtl8139_cleanup_dev (dev);
+ return rc;
+}
+
+
+static int __devinit rtl8139_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtl8139_private *tp;
+ int i, addr_len, option;
+ void *ioaddr;
+ static int board_idx = -1;
+ u8 pci_rev;
+
+ assert (pdev != NULL);
+ assert (ent != NULL);
+
+ board_idx++;
+
+ /* when we're built into the kernel, the driver version message
+ * is only printed if at least one 8139 board has been found
+ */
+#ifndef MODULE
+ {
+ static int printed_version;
+ if (!printed_version++)
+ printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+ }
+#endif
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) {
+ printk(KERN_INFO PFX "pci dev %s (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
+ pdev->slot_name, pdev->vendor, pdev->device, pci_rev);
+ printk(KERN_INFO PFX "Use the \"8139cp\" driver for improved performance and stability.\n");
+ }
+
+ i = rtl8139_init_board (pdev, &dev);
+ if (i < 0)
+ return i;
+
+ tp = dev->priv;
+ ioaddr = tp->mmio_addr;
+
+ assert (ioaddr != NULL);
+ assert (dev != NULL);
+ assert (tp != NULL);
+
+ addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((u16 *) (dev->dev_addr))[i] =
+ le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+
+ /* The Rtl8139-specific entries in the device structure. */
+ dev->open = rtl8139_open;
+ dev->hard_start_xmit = rtl8139_start_xmit;
+ dev->stop = rtl8139_close;
+ dev->get_stats = rtl8139_get_stats;
+ dev->set_multicast_list = rtl8139_set_rx_mode;
+#if 0
+ dev->do_ioctl = netdev_ioctl;
+#endif
+ dev->tx_timeout = rtl8139_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* note: the hardware is not capable of sg/csum/highdma, however
+ * through the use of skb_copy_and_csum_dev we enable these
+ * features
+ */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+
+ dev->irq = pdev->irq;
+
+ /* dev->priv/tp zeroed and aligned in init_etherdev */
+ tp = dev->priv;
+
+ /* note: tp->chipset set in rtl8139_init_board */
+ tp->drv_flags = board_info[ent->driver_data].hw_flags;
+ tp->mmio_addr = ioaddr;
+ spin_lock_init (&tp->lock);
+#if 0
+ init_waitqueue_head (&tp->thr_wait);
+ init_completion (&tp->thr_exited);
+#endif
+ tp->mii.dev = dev;
+ tp->mii.mdio_read = mdio_read;
+ tp->mii.mdio_write = mdio_write;
+ tp->mii.phy_id_mask = 0x3f;
+ tp->mii.reg_num_mask = 0x1f;
+
+ /* dev is fully set up and ready to use now */
+ DPRINTK("about to register device named %s (%p)...\n", dev->name, dev);
+ i = register_netdev (dev);
+ if (i) goto err_out;
+
+ pci_set_drvdata (pdev, dev);
+
+ printk (KERN_INFO "%s: %s at 0x%lx, "
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+ "IRQ %d\n",
+ dev->name,
+ board_info[ent->driver_data].name,
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5],
+ dev->irq);
+
+ printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
+ dev->name, rtl_chip_info[tp->chipset].name);
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+#ifdef CONFIG_8139TOO_8129
+ if (tp->drv_flags & HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ u16 advertising = mdio_read(dev, phy, 4);
+ tp->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
+ "advertising %4.4x.\n",
+ dev->name, phy, mii_status, advertising);
+ }
+ }
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
+ "transceiver.\n",
+ dev->name);
+ tp->phys[0] = 32;
+ }
+ } else
+#endif
+ tp->phys[0] = 32;
+ tp->mii.phy_id = tp->phys[0];
+
+ /* The lower four bits are the media type. */
+ option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+ if (option > 0) {
+ tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
+ tp->default_port = option & 0xFF;
+ if (tp->default_port)
+ tp->mii.force_media = 1;
+ }
+ if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
+ tp->mii.full_duplex = full_duplex[board_idx];
+ if (tp->mii.full_duplex) {
+ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ /* Changing the MII-advertised media because might prevent
+ re-connection. */
+ tp->mii.force_media = 1;
+ }
+ if (tp->default_port) {
+ printk(KERN_INFO " Forcing %dMbps %s-duplex operation.\n",
+ (option & 0x20 ? 100 : 10),
+ (option & 0x10 ? "full" : "half"));
+ mdio_write(dev, tp->phys[0], 0,
+ ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
+ ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ /* Put the chip into low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
+ alert_slow_netdevice(dev, (char *)board_info[ent->driver_data].name);
+
+ return 0;
+
+err_out:
+ __rtl8139_cleanup_dev (dev);
+ return i;
+}
+
+
+static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *np;
+
+ assert (dev != NULL);
+ np = dev->priv;
+ assert (np != NULL);
+
+ unregister_netdev (dev);
+
+ __rtl8139_cleanup_dev (dev);
+}
+
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void *ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ writeb (EE_ENB & ~EE_CS, ee_addr);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writeb (EE_ENB | dataval, ee_addr);
+ eeprom_delay ();
+ writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ }
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ for (i = 16; i > 0; i--) {
+ writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ retval =
+ (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+ }
+
+ /* Terminate the EEPROM access. */
+ writeb (~EE_CS, ee_addr);
+ eeprom_delay ();
+
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol.
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr) readb(mdio_addr)
+
+
+static char mii_2_8139_map[8] = {
+ BasicModeCtrl,
+ BasicModeStatus,
+ 0,
+ 0,
+ NWayAdvert,
+ NWayLPAR,
+ NWayExpansion,
+ 0
+};
+
+
+#ifdef CONFIG_8139TOO_8129
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync (void *mdio_addr)
+{
+ int i;
+
+ for (i = 32; i >= 0; i--) {
+ writeb (MDIO_WRITE1, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+}
+#endif
+
+static int mdio_read (struct net_device *dev, int phy_id, int location)
+{
+ struct rtl8139_private *tp = dev->priv;
+ int retval = 0;
+#ifdef CONFIG_8139TOO_8129
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i;
+#endif
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ return location < 8 && mii_2_8139_map[location] ?
+ readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+ }
+
+#ifdef CONFIG_8139TOO_8129
+ mdio_sync (mdio_addr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ writeb (MDIO_DIR | dataval, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ writeb (0, mdio_addr);
+ mdio_delay (mdio_addr);
+ retval = (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+ writeb (MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+#endif
+
+ return (retval >> 1) & 0xffff;
+}
+
+
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct rtl8139_private *tp = dev->priv;
+#ifdef CONFIG_8139TOO_8129
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ int i;
+#endif
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ void *ioaddr = tp->mmio_addr;
+ if (location == 0) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W16 (BasicModeCtrl, value);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ } else if (location < 8 && mii_2_8139_map[location])
+ RTL_W16 (mii_2_8139_map[location], value);
+ return;
+ }
+
+#ifdef CONFIG_8139TOO_8129
+ mdio_sync (mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval =
+ (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ writeb (dataval, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (dataval | MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ writeb (0, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+#endif
+}
+
+
+static int rtl8139_open (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ int retval;
+#ifdef RTL8139_DEBUG
+ void *ioaddr = tp->mmio_addr;
+#endif
+
+ retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval)
+ return retval;
+
+ tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ &tp->tx_bufs_dma);
+ tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ &tp->rx_ring_dma);
+ if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+ free_irq(dev->irq, dev);
+
+ if (tp->tx_bufs)
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ if (tp->rx_ring)
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+
+ return -ENOMEM;
+
+ }
+
+ tp->mii.full_duplex = tp->mii.force_media;
+ tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+ tp->twistie = (tp->chipset == CH_8139_K) ? 1 : 0;
+ tp->time_to_die = 0;
+
+ rtl8139_init_ring (dev);
+ rtl8139_hw_start (dev);
+
+ DPRINTK ("%s: rtl8139_open() ioaddr %#lx IRQ %d"
+ " GP Pins %2.2x %s-duplex.\n",
+ dev->name, pci_resource_start (tp->pci_dev, 1),
+ dev->irq, RTL_R8 (MediaStatus),
+ tp->mii.full_duplex ? "full" : "half");
+
+#if 0
+ tp->thr_pid = kernel_thread (rtl8139_thread, dev, CLONE_FS | CLONE_FILES);
+ if (tp->thr_pid < 0)
+ printk (KERN_WARNING "%s: unable to start kernel thread\n",
+ dev->name);
+#else
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + next_tick;
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = rtl8139_timer;
+ add_timer(&tp->timer);
+#endif
+
+ return 0;
+}
+
+
+static void rtl_check_media (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+
+ if (tp->phys[0] >= 0) {
+ u16 mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
+ if (mii_lpa == 0xffff)
+ ; /* Not there */
+ else if ((mii_lpa & LPA_100FULL) == LPA_100FULL
+ || (mii_lpa & 0x00C0) == LPA_10FULL)
+ tp->mii.full_duplex = 1;
+
+ printk (KERN_INFO"%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n",
+ dev->name, mii_lpa == 0 ? "" :
+ (mii_lpa & 0x0180) ? "100mbps " : "10mbps ",
+ tp->mii.full_duplex ? "full" : "half", mii_lpa);
+ }
+}
+
+/* Start the hardware at open or resume. */
+static void rtl8139_hw_start (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ u32 i;
+ u8 tmp;
+
+ /* Bring old chips out of low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'R');
+
+ rtl8139_chip_reset (ioaddr);
+
+ /* unlock Config[01234] and BMCR register writes */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ /* Restore our idea of the MAC address. */
+ RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
+ RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
+
+ /* Check this value: the documentation for IFG contradicts ifself. */
+ RTL_W32 (TxConfig, rtl8139_tx_config);
+
+ tp->cur_rx = 0;
+
+ rtl_check_media (dev);
+
+ if (tp->chipset >= CH_8139B) {
+ /* Disable magic packet scanning, which is enabled
+ * when PM is enabled in Config1. It can be reenabled
+ * via ETHTOOL_SWOL if desired. */
+ RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
+ }
+
+ DPRINTK("init buffer addresses\n");
+
+ /* Lock Config[01234] and BMCR register writes */
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* init Tx buffer DMA addresses */
+ for (i = 0; i < NUM_TX_DESC; i++)
+ RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
+ RTL_W32 (RxMissed, 0);
+
+ rtl8139_set_rx_mode (dev);
+
+ /* no early-rx interrupts */
+ RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
+
+ /* make sure RxTx has started */
+ tmp = RTL_R8 (ChipCmd);
+ if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16 (IntrMask, rtl8139_intr_mask);
+
+ netif_start_queue (dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void rtl8139_init_ring (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ int i;
+
+ tp->cur_rx = 0;
+ tp->cur_tx = 0;
+ tp->dirty_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++)
+ tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+}
+
+
+#ifndef CONFIG_8139TOO_TUNE_TWISTER
+static inline void rtl8139_tune_twister (struct net_device *dev,
+ struct rtl8139_private *tp) {}
+#else
+static void rtl8139_tune_twister (struct net_device *dev,
+ struct rtl8139_private *tp)
+{
+ int linkcase;
+ void *ioaddr = tp->mmio_addr;
+
+ /* This is a complicated state machine to configure the "twister" for
+ impedance/echos based on the cable length.
+ All of this is magic and undocumented.
+ */
+ switch (tp->twistie) {
+ case 1:
+ if (RTL_R16 (CSCR) & CSCR_LinkOKBit) {
+ /* We have link beat, let us tune the twister. */
+ RTL_W16 (CSCR, CSCR_LinkDownOffCmd);
+ tp->twistie = 2; /* Change to state 2. */
+ next_tick = HZ / 10;
+ } else {
+ /* Just put in some reasonable defaults for when beat returns. */
+ RTL_W16 (CSCR, CSCR_LinkDownCmd);
+ RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */
+ RTL_W32 (PARA78, PARA78_default);
+ RTL_W32 (PARA7c, PARA7c_default);
+ tp->twistie = 0; /* Bail from future actions. */
+ }
+ break;
+ case 2:
+ /* Read how long it took to hear the echo. */
+ linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits;
+ if (linkcase == 0x7000)
+ tp->twist_row = 3;
+ else if (linkcase == 0x3000)
+ tp->twist_row = 2;
+ else if (linkcase == 0x1000)
+ tp->twist_row = 1;
+ else
+ tp->twist_row = 0;
+ tp->twist_col = 0;
+ tp->twistie = 3; /* Change to state 2. */
+ next_tick = HZ / 10;
+ break;
+ case 3:
+ /* Put out four tuning parameters, one per 100msec. */
+ if (tp->twist_col == 0)
+ RTL_W16 (FIFOTMS, 0);
+ RTL_W32 (PARA7c, param[(int) tp->twist_row]
+ [(int) tp->twist_col]);
+ next_tick = HZ / 10;
+ if (++tp->twist_col >= 4) {
+ /* For short cables we are done.
+ For long cables (row == 3) check for mistune. */
+ tp->twistie =
+ (tp->twist_row == 3) ? 4 : 0;
+ }
+ break;
+ case 4:
+ /* Special case for long cables: check for mistune. */
+ if ((RTL_R16 (CSCR) &
+ CSCR_LinkStatusBits) == 0x7000) {
+ tp->twistie = 0;
+ break;
+ } else {
+ RTL_W32 (PARA7c, 0xfb38de03);
+ tp->twistie = 5;
+ next_tick = HZ / 10;
+ }
+ break;
+ case 5:
+ /* Retune for shorter cable (column 2). */
+ RTL_W32 (FIFOTMS, 0x20);
+ RTL_W32 (PARA78, PARA78_default);
+ RTL_W32 (PARA7c, PARA7c_default);
+ RTL_W32 (FIFOTMS, 0x00);
+ tp->twist_row = 2;
+ tp->twist_col = 0;
+ tp->twistie = 3;
+ next_tick = HZ / 10;
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+}
+#endif /* CONFIG_8139TOO_TUNE_TWISTER */
+
+
+static inline void rtl8139_thread_iter (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr)
+{
+ int mii_lpa;
+
+ mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
+
+ if (!tp->mii.force_media && mii_lpa != 0xffff) {
+ int duplex = (mii_lpa & LPA_100FULL)
+ || (mii_lpa & 0x01C0) == 0x0040;
+ if (tp->mii.full_duplex != duplex) {
+ tp->mii.full_duplex = duplex;
+
+ if (mii_lpa) {
+ printk (KERN_INFO
+ "%s: Setting %s-duplex based on MII #%d link"
+ " partner ability of %4.4x.\n",
+ dev->name,
+ tp->mii.full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
+ } else {
+ printk(KERN_INFO"%s: media is unconnected, link down, or incompatible connection\n",
+ dev->name);
+ }
+#if 0
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+#endif
+ }
+ }
+
+ next_tick = HZ * 60;
+
+ rtl8139_tune_twister (dev, tp);
+
+ DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, RTL_R16 (NWayLPAR));
+ DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n",
+ dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus));
+ DPRINTK ("%s: Chip config %2.2x %2.2x.\n",
+ dev->name, RTL_R8 (Config0),
+ RTL_R8 (Config1));
+}
+
+
+#if 0
+static int rtl8139_thread (void *data)
+{
+ struct net_device *dev = data;
+ struct rtl8139_private *tp = dev->priv;
+ unsigned long timeout;
+
+ daemonize ();
+ reparent_to_init();
+ spin_lock_irq(¤t->sigmask_lock);
+ sigemptyset(¤t->blocked);
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ strncpy (current->comm, dev->name, sizeof(current->comm) - 1);
+ current->comm[sizeof(current->comm) - 1] = '\0';
+
+ while (1) {
+ timeout = next_tick;
+ do {
+ timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
+ } while (!signal_pending (current) && (timeout > 0));
+
+ if (signal_pending (current)) {
+ spin_lock_irq(¤t->sigmask_lock);
+ flush_signals(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+ }
+
+ if (tp->time_to_die)
+ break;
+
+ rtnl_lock ();
+ rtl8139_thread_iter (dev, tp, tp->mmio_addr);
+ rtnl_unlock ();
+ }
+
+ complete_and_exit (&tp->thr_exited, 0);
+}
+#else
+static void rtl8139_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct rtl8139_private *tp = dev->priv;
+ rtl8139_thread_iter (dev, tp, tp->mmio_addr);
+ mod_timer(&tp->timer, jiffies + next_tick);
+}
+#endif
+
+
+static void rtl8139_tx_clear (struct rtl8139_private *tp)
+{
+ tp->cur_tx = 0;
+ tp->dirty_tx = 0;
+
+ /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
+}
+
+
+static void rtl8139_tx_timeout (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ int i;
+ u8 tmp8;
+ unsigned long flags;
+
+ DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
+ "media %2.2x.\n", dev->name,
+ RTL_R8 (ChipCmd),
+ RTL_R16 (IntrStatus),
+ RTL_R8 (MediaStatus));
+
+ tp->xstats.tx_timeouts++;
+
+ /* disable Tx ASAP, if not already */
+ tmp8 = RTL_R8 (ChipCmd);
+ if (tmp8 & CmdTxEnb)
+ RTL_W8 (ChipCmd, CmdRxEnb);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ RTL_W16 (IntrMask, 0x0000);
+
+ /* Emit info to figure out what went wrong. */
+ printk (KERN_DEBUG "%s: Tx queue start entry %ld dirty entry %ld.\n",
+ dev->name, tp->cur_tx, tp->dirty_tx);
+ for (i = 0; i < NUM_TX_DESC; i++)
+ printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
+ dev->name, i, RTL_R32 (TxStatus0 + (i * 4)),
+ i == tp->dirty_tx % NUM_TX_DESC ?
+ " (queue head)" : "");
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ spin_lock_irqsave (&tp->lock, flags);
+ rtl8139_tx_clear (tp);
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ /* ...and finally, reset everything */
+ rtl8139_hw_start (dev);
+
+ netif_wake_queue (dev);
+}
+
+
+static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ unsigned int entry;
+ unsigned int len = skb->len;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % NUM_TX_DESC;
+
+ if (likely(len < TX_BUF_SIZE)) {
+ if(len < ETH_ZLEN)
+ memset(tp->tx_buf[entry], 0, ETH_ZLEN);
+#if 0
+ skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+#else
+ skb_copy_bits(skb, 0, tp->tx_buf[entry], skb->len);
+#endif
+ dev_kfree_skb(skb);
+ } else {
+ dev_kfree_skb(skb);
+ tp->stats.tx_dropped++;
+ return 0;
+ }
+
+ /* Note: the chip doesn't have auto-pad! */
+ spin_lock_irq(&tp->lock);
+ RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
+ tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
+
+ dev->trans_start = jiffies;
+
+ tp->cur_tx++;
+ wmb();
+
+ if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
+ netif_stop_queue (dev);
+ spin_unlock_irq(&tp->lock);
+
+ DPRINTK ("%s: Queued Tx packet size %u to slot %d.\n",
+ dev->name, len, entry);
+
+ return 0;
+}
+
+
+static void rtl8139_tx_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr)
+{
+ unsigned long dirty_tx, tx_left;
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ dirty_tx = tp->dirty_tx;
+ tx_left = tp->cur_tx - dirty_tx;
+ while (tx_left > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus;
+
+ txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
+
+ if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+ DPRINTK ("%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ tp->stats.tx_errors++;
+ if (txstatus & TxAborted) {
+ tp->stats.tx_aborted_errors++;
+ RTL_W32 (TxConfig, TxClearAbt);
+ RTL_W16 (IntrStatus, TxErr);
+ wmb();
+ }
+ if (txstatus & TxCarrierLost)
+ tp->stats.tx_carrier_errors++;
+ if (txstatus & TxOutOfWindow)
+ tp->stats.tx_window_errors++;
+ } else {
+ if (txstatus & TxUnderrun) {
+ /* Add 64 to the Tx FIFO threshold. */
+ if (tp->tx_flag < 0x00300000)
+ tp->tx_flag += 0x00020000;
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+ tp->stats.tx_bytes += txstatus & 0x7ff;
+ tp->stats.tx_packets++;
+ }
+
+ dirty_tx++;
+ tx_left--;
+ }
+
+#ifndef RTL8139_NDEBUG
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ printk (KERN_ERR "%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
+ dev->name, dirty_tx, tp->cur_tx);
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif /* RTL8139_NDEBUG */
+
+ /* only wake the queue if we did work, and the queue is stopped */
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ mb();
+ if (netif_queue_stopped (dev))
+ netif_wake_queue (dev);
+ }
+}
+
+
+/* TODO: clean this up! Rx reset need not be this intensive */
+static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
+ struct rtl8139_private *tp, void *ioaddr)
+{
+ u8 tmp8;
+#ifdef CONFIG_8139_OLD_RX_RESET
+ int tmp_work;
+#endif
+
+ DPRINTK ("%s: Ethernet frame had errors, status %8.8x.\n",
+ dev->name, rx_status);
+ tp->stats.rx_errors++;
+ if (!(rx_status & RxStatusOK)) {
+ if (rx_status & RxTooLong) {
+ DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
+ dev->name, rx_status);
+ /* A.C.: The chip hangs here. */
+ }
+ if (rx_status & (RxBadSymbol | RxBadAlign))
+ tp->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt | RxTooLong))
+ tp->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr)
+ tp->stats.rx_crc_errors++;
+ } else {
+ tp->xstats.rx_lost_in_ring++;
+ }
+
+#ifndef CONFIG_8139_OLD_RX_RESET
+ tmp8 = RTL_R8 (ChipCmd);
+ RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb);
+ RTL_W8 (ChipCmd, tmp8);
+ RTL_W32 (RxConfig, tp->rx_config);
+ tp->cur_rx = 0;
+#else
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+
+ /* disable receive */
+ RTL_W8_F (ChipCmd, CmdTxEnb);
+ tmp_work = 200;
+ while (--tmp_work > 0) {
+ udelay(1);
+ tmp8 = RTL_R8 (ChipCmd);
+ if (!(tmp8 & CmdRxEnb))
+ break;
+ }
+ if (tmp_work <= 0)
+ printk (KERN_WARNING PFX "rx stop wait too long\n");
+ /* restart receive */
+ tmp_work = 200;
+ while (--tmp_work > 0) {
+ RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb);
+ udelay(1);
+ tmp8 = RTL_R8 (ChipCmd);
+ if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
+ break;
+ }
+ if (tmp_work <= 0)
+ printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+
+ /* and reinitialize all rx related registers */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
+ tp->cur_rx = 0;
+
+ DPRINTK("init buffer addresses\n");
+
+ /* Lock Config[01234] and BMCR register writes */
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* A.C.: Reset the multicast list. */
+ __set_rx_mode (dev);
+#endif
+}
+
+static void rtl8139_rx_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp, void *ioaddr)
+{
+ unsigned char *rx_ring;
+ u16 cur_rx;
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ rx_ring = tp->rx_ring;
+ cur_rx = tp->cur_rx;
+
+ DPRINTK ("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
+ RTL_R16 (RxBufAddr),
+ RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+
+ while ((RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
+ int ring_offset = cur_rx % RX_BUF_LEN;
+ u32 rx_status;
+ unsigned int rx_size;
+ unsigned int pkt_size;
+ struct sk_buff *skb;
+
+ rmb();
+
+ /* read size+status of next frame from DMA ring buffer */
+ rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+ rx_size = rx_status >> 16;
+ pkt_size = rx_size - 4;
+
+ DPRINTK ("%s: rtl8139_rx() status %4.4x, size %4.4x,"
+ " cur %4.4x.\n", dev->name, rx_status,
+ rx_size, cur_rx);
+#if RTL8139_DEBUG > 2
+ {
+ int i;
+ DPRINTK ("%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk (" %2.2x",
+ rx_ring[ring_offset + i]);
+ printk (".\n");
+ }
+#endif
+
+ /* Packet copy from FIFO still in progress.
+ * Theoretically, this should never happen
+ * since EarlyRx is disabled.
+ */
+ if (rx_size == 0xfff0) {
+ tp->xstats.early_rx++;
+ break;
+ }
+
+ /* If Rx err or invalid rx_size/rx_status received
+ * (which happens if we get lost in the ring),
+ * Rx process gets reset, so we abort any further
+ * Rx processing.
+ */
+ if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+ (rx_size < 8) ||
+ (!(rx_status & RxStatusOK))) {
+ rtl8139_rx_err (rx_status, dev, tp, ioaddr);
+ return;
+ }
+
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+
+ /* TODO: consider allocating skb's outside of
+ * interrupt context, both to speed interrupt processing,
+ * and also to reduce the chances of having to
+ * drop packets here under memory pressure.
+ */
+
+ skb = dev_alloc_skb (pkt_size + 2);
+ if (skb) {
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align the IP fields. */
+
+ eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_put (skb, pkt_size);
+
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->last_rx = jiffies;
+ tp->stats.rx_bytes += pkt_size;
+ tp->stats.rx_packets++;
+ } else {
+ printk (KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ tp->stats.rx_dropped++;
+ }
+
+ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ RTL_W16 (RxBufPtr, cur_rx - 16);
+
+ if (RTL_R16 (IntrStatus) & RxAckBits)
+ RTL_W16_F (IntrStatus, RxAckBits);
+ }
+
+ DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
+ RTL_R16 (RxBufAddr),
+ RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+
+ tp->cur_rx = cur_rx;
+}
+
+
+static void rtl8139_weird_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr,
+ int status, int link_changed)
+{
+ DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ /* Update the error count. */
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ if ((status & RxUnderrun) && link_changed &&
+ (tp->drv_flags & HAS_LNK_CHNG)) {
+ /* Really link-change on new chips. */
+ int lpar = RTL_R16 (NWayLPAR);
+ int duplex = (lpar & LPA_100FULL) || (lpar & 0x01C0) == 0x0040
+ || tp->mii.force_media;
+ if (tp->mii.full_duplex != duplex) {
+ tp->mii.full_duplex = duplex;
+#if 0
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+#endif
+ }
+ status &= ~RxUnderrun;
+ }
+
+ /* XXX along with rtl8139_rx_err, are we double-counting errors? */
+ if (status &
+ (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+ tp->stats.rx_errors++;
+
+ if (status & PCSTimeout)
+ tp->stats.rx_length_errors++;
+ if (status & (RxUnderrun | RxFIFOOver))
+ tp->stats.rx_fifo_errors++;
+ if (status & PCIErr) {
+ u16 pci_cmd_status;
+ pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+ pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
+
+ printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
+ dev->name, pci_cmd_status);
+ }
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void rtl8139_interrupt (int irq, void *dev_instance,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct rtl8139_private *tp = dev->priv;
+ int boguscnt = max_interrupt_work;
+ void *ioaddr = tp->mmio_addr;
+ int ackstat, status;
+ int link_changed = 0; /* avoid bogus "uninit" warning */
+
+ spin_lock (&tp->lock);
+
+ do {
+ status = RTL_R16 (IntrStatus);
+
+ /* h/w no longer present (hotplug?) or major error, bail */
+ if (status == 0xFFFF)
+ break;
+
+ if ((status &
+ (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
+ break;
+
+ /* Acknowledge all of the current interrupt sources ASAP, but
+ an first get an additional status bit from CSCR. */
+ if (status & RxUnderrun)
+ link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
+
+ /* The chip takes special action when we clear RxAckBits,
+ * so we clear them later in rtl8139_rx_interrupt
+ */
+ ackstat = status & ~(RxAckBits | TxErr);
+ RTL_W16 (IntrStatus, ackstat);
+
+ DPRINTK ("%s: interrupt status=%#4.4x ackstat=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, ackstat, status, RTL_R16 (IntrStatus));
+
+ if (netif_running (dev) && (status & RxAckBits))
+ rtl8139_rx_interrupt (dev, tp, ioaddr);
+
+ /* Check uncommon events with one test. */
+ if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
+ RxFIFOOver | RxErr))
+ rtl8139_weird_interrupt (dev, tp, ioaddr,
+ status, link_changed);
+
+ if (netif_running (dev) && (status & (TxOK | TxErr))) {
+ rtl8139_tx_interrupt (dev, tp, ioaddr);
+ if (status & TxErr)
+ RTL_W16 (IntrStatus, TxErr);
+ }
+
+ boguscnt--;
+ } while (boguscnt > 0);
+
+ if (boguscnt <= 0) {
+ printk (KERN_WARNING "%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n", dev->name, status);
+
+ /* Clear all interrupt sources. */
+ RTL_W16 (IntrStatus, 0xffff);
+ }
+
+ spin_unlock (&tp->lock);
+
+ DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, RTL_R16 (IntrStatus));
+}
+
+
+static int rtl8139_close (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+#if 0
+ int ret = 0;
+#endif
+ unsigned long flags;
+
+ netif_stop_queue (dev);
+
+#if 0
+ if (tp->thr_pid >= 0) {
+ tp->time_to_die = 1;
+ wmb();
+ ret = kill_proc (tp->thr_pid, SIGTERM, 1);
+ if (ret) {
+ printk (KERN_ERR "%s: unable to signal thread\n", dev->name);
+ return ret;
+ }
+ wait_for_completion (&tp->thr_exited);
+ }
+#else
+ del_timer_sync(&tp->timer);
+#endif
+
+ DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, RTL_R16 (IntrStatus));
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ RTL_W8 (ChipCmd, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ RTL_W16 (IntrMask, 0);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ synchronize_irq ();
+ free_irq (dev->irq, dev);
+
+ rtl8139_tx_clear (tp);
+
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ tp->rx_ring = NULL;
+ tp->tx_bufs = NULL;
+
+ /* Green! Put the chip in low-power mode. */
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
+ return 0;
+}
+
+
+/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to
+ kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and
+ other threads or interrupts aren't messing with the 8139. */
+static void netdev_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8139_private *np = dev->priv;
+ void *ioaddr = np->mmio_addr;
+
+ if (rtl_chip_info[np->chipset].flags & HasLWake) {
+ u8 cfg3 = RTL_R8 (Config3);
+ u8 cfg5 = RTL_R8 (Config5);
+
+ wol->supported = WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+
+ wol->wolopts = 0;
+ if (cfg3 & Cfg3_LinkUp)
+ wol->wolopts |= WAKE_PHY;
+ if (cfg3 & Cfg3_Magic)
+ wol->wolopts |= WAKE_MAGIC;
+ /* (KON)FIXME: See how netdev_set_wol() handles the
+ following constants. */
+ if (cfg5 & Cfg5_UWF)
+ wol->wolopts |= WAKE_UCAST;
+ if (cfg5 & Cfg5_MWF)
+ wol->wolopts |= WAKE_MCAST;
+ if (cfg5 & Cfg5_BWF)
+ wol->wolopts |= WAKE_BCAST;
+ }
+}
+
+
+/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes
+ that wol points to kernel memory and other threads or interrupts
+ aren't messing with the 8139. */
+static int netdev_set_wol (struct net_device *dev,
+ const struct ethtool_wolinfo *wol)
+{
+ struct rtl8139_private *np = dev->priv;
+ void *ioaddr = np->mmio_addr;
+ u32 support;
+ u8 cfg3, cfg5;
+
+ support = ((rtl_chip_info[np->chipset].flags & HasLWake)
+ ? (WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
+ : 0);
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
+ if (wol->wolopts & WAKE_PHY)
+ cfg3 |= Cfg3_LinkUp;
+ if (wol->wolopts & WAKE_MAGIC)
+ cfg3 |= Cfg3_Magic;
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config3, cfg3);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF);
+ /* (KON)FIXME: These are untested. We may have to set the
+ CRC0, Wakeup0 and LSBCRC0 registers too, but I have no
+ documentation. */
+ if (wol->wolopts & WAKE_UCAST)
+ cfg5 |= Cfg5_UWF;
+ if (wol->wolopts & WAKE_MCAST)
+ cfg5 |= Cfg5_MWF;
+ if (wol->wolopts & WAKE_BCAST)
+ cfg5 |= Cfg5_BWF;
+ RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */
+
+ return 0;
+}
+
+#if 0
+static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
+{
+ struct rtl8139_private *np = dev->priv;
+ u32 ethcmd;
+
+ /* dev_ioctl() in ../../net/core/dev.c has already checked
+ capable(CAP_NET_ADMIN), so don't bother with that here. */
+
+ if (get_user(ethcmd, (u32 *)useraddr))
+ return -EFAULT;
+
+ switch (ethcmd) {
+
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+ strcpy (info.driver, DRV_NAME);
+ strcpy (info.version, DRV_VERSION);
+ strcpy (info.bus_info, np->pci_dev->slot_name);
+ info.regdump_len = np->regs_len;
+ if (copy_to_user (useraddr, &info, sizeof (info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get settings */
+ case ETHTOOL_GSET: {
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii, &ecmd);
+ spin_unlock_irq(&np->lock);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set settings */
+ case ETHTOOL_SSET: {
+ int r;
+ struct ethtool_cmd ecmd;
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ spin_lock_irq(&np->lock);
+ r = mii_ethtool_sset(&np->mii, &ecmd);
+ spin_unlock_irq(&np->lock);
+ return r;
+ }
+ /* restart autonegotiation */
+ case ETHTOOL_NWAY_RST: {
+ return mii_nway_restart(&np->mii);
+ }
+ /* get link status */
+ case ETHTOOL_GLINK: {
+ struct ethtool_value edata = {ETHTOOL_GLINK};
+ edata.data = mii_link_ok(&np->mii);
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get message-level */
+ case ETHTOOL_GMSGLVL: {
+ struct ethtool_value edata = {ETHTOOL_GMSGLVL};
+ edata.data = debug;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set message-level */
+ case ETHTOOL_SMSGLVL: {
+ struct ethtool_value edata;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ debug = edata.data;
+ return 0;
+ }
+
+ case ETHTOOL_GWOL:
+ {
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+ spin_lock_irq (&np->lock);
+ netdev_get_wol (dev, &wol);
+ spin_unlock_irq (&np->lock);
+ if (copy_to_user (useraddr, &wol, sizeof (wol)))
+ return -EFAULT;
+ return 0;
+ }
+
+ case ETHTOOL_SWOL:
+ {
+ struct ethtool_wolinfo wol;
+ int rc;
+ if (copy_from_user (&wol, useraddr, sizeof (wol)))
+ return -EFAULT;
+ spin_lock_irq (&np->lock);
+ rc = netdev_set_wol (dev, &wol);
+ spin_unlock_irq (&np->lock);
+ return rc;
+ }
+
+/* TODO: we are too slack to do reg dumping for pio, for now */
+#ifndef CONFIG_8139TOO_PIO
+ /* NIC register dump */
+ case ETHTOOL_GREGS: {
+ struct ethtool_regs regs;
+ unsigned int regs_len = np->regs_len;
+ u8 *regbuf = kmalloc(regs_len, GFP_KERNEL);
+ int rc;
+
+ if (!regbuf)
+ return -ENOMEM;
+ memset(regbuf, 0, regs_len);
+
+ rc = copy_from_user(®s, useraddr, sizeof(regs));
+ if (rc) {
+ rc = -EFAULT;
+ goto err_out_gregs;
+ }
+
+ if (regs.len > regs_len)
+ regs.len = regs_len;
+ if (regs.len < regs_len) {
+ rc = -EINVAL;
+ goto err_out_gregs;
+ }
+
+ regs.version = RTL_REGS_VER;
+ rc = copy_to_user(useraddr, ®s, sizeof(regs));
+ if (rc) {
+ rc = -EFAULT;
+ goto err_out_gregs;
+ }
+
+ useraddr += offsetof(struct ethtool_regs, data);
+
+ spin_lock_irq(&np->lock);
+ memcpy_fromio(regbuf, np->mmio_addr, regs_len);
+ spin_unlock_irq(&np->lock);
+
+ if (copy_to_user(useraddr, regbuf, regs_len))
+ rc = -EFAULT;
+
+err_out_gregs:
+ kfree(regbuf);
+ return rc;
+ }
+#endif /* CONFIG_8139TOO_PIO */
+
+ /* get string list(s) */
+ case ETHTOOL_GSTRINGS: {
+ struct ethtool_gstrings estr = { ETHTOOL_GSTRINGS };
+
+ if (copy_from_user(&estr, useraddr, sizeof(estr)))
+ return -EFAULT;
+ if (estr.string_set != ETH_SS_STATS)
+ return -EINVAL;
+
+ estr.len = RTL_NUM_STATS;
+ if (copy_to_user(useraddr, &estr, sizeof(estr)))
+ return -EFAULT;
+ if (copy_to_user(useraddr + sizeof(estr),
+ ðtool_stats_keys,
+ sizeof(ethtool_stats_keys)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get NIC-specific statistics */
+ case ETHTOOL_GSTATS: {
+ struct ethtool_stats estats = { ETHTOOL_GSTATS };
+ u64 *tmp_stats;
+ const unsigned int sz = sizeof(u64) * RTL_NUM_STATS;
+ int i;
+
+ estats.n_stats = RTL_NUM_STATS;
+ if (copy_to_user(useraddr, &estats, sizeof(estats)))
+ return -EFAULT;
+
+ tmp_stats = kmalloc(sz, GFP_KERNEL);
+ if (!tmp_stats)
+ return -ENOMEM;
+ memset(tmp_stats, 0, sz);
+
+ i = 0;
+ tmp_stats[i++] = np->xstats.early_rx;
+ tmp_stats[i++] = np->xstats.tx_buf_mapped;
+ tmp_stats[i++] = np->xstats.tx_timeouts;
+ tmp_stats[i++] = np->xstats.rx_lost_in_ring;
+ if (i != RTL_NUM_STATS)
+ BUG();
+
+ i = copy_to_user(useraddr + sizeof(estats), tmp_stats, sz);
+ kfree(tmp_stats);
+
+ if (i)
+ return -EFAULT;
+ return 0;
+ }
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rtl8139_private *np = dev->priv;
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == SIOCETHTOOL)
+ rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
+
+ else {
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+ }
+
+ return rc;
+}
+#endif
+
+
+static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave (&tp->lock, flags);
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+ spin_unlock_irqrestore (&tp->lock, flags);
+ }
+
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static void __set_rx_mode (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+ u32 tmp;
+
+ DPRINTK ("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
+ dev->name, dev->flags, RTL_R32 (RxConfig));
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ /* We can safely update without stopping the chip. */
+ tmp = rtl8139_rx_config | rx_mode;
+ if (tp->rx_config != tmp) {
+ RTL_W32_F (RxConfig, tmp);
+ tp->rx_config = tmp;
+ }
+ RTL_W32_F (MAR0 + 0, mc_filter[0]);
+ RTL_W32_F (MAR0 + 4, mc_filter[1]);
+}
+
+static void rtl8139_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct rtl8139_private *tp = dev->priv;
+
+ spin_lock_irqsave (&tp->lock, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore (&tp->lock, flags);
+}
+
+#ifdef CONFIG_PM
+
+static int rtl8139_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (!netif_running (dev))
+ return 0;
+
+ netif_device_detach (dev);
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts, stop Tx and Rx. */
+ RTL_W16 (IntrMask, 0);
+ RTL_W8 (ChipCmd, 0);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+ return 0;
+}
+
+
+static int rtl8139_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ if (!netif_running (dev))
+ return 0;
+ netif_device_attach (dev);
+ rtl8139_hw_start (dev);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver rtl8139_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rtl8139_pci_tbl,
+ .probe = rtl8139_init_one,
+ .remove = __devexit_p(rtl8139_remove_one),
+#ifdef CONFIG_PM
+ .suspend = rtl8139_suspend,
+ .resume = rtl8139_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init rtl8139_init_module (void)
+{
+ /* when we're a module, we always print a version message,
+ * even if no 8139 board is found.
+ */
+#ifdef MODULE
+ printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+#endif
+
+ return pci_module_init (&rtl8139_pci_driver);
+}
+
+
+static void __exit rtl8139_cleanup_module (void)
+{
+ pci_unregister_driver (&rtl8139_pci_driver);
+}
+
+
+module_init(rtl8139_init_module);
+module_exit(rtl8139_cleanup_module);
include $(BASEDIR)/Rules.mk
default: $(OBJS)
- $(MAKE) -C ne
+ $(MAKE) -C tulip
+ $(MAKE) -C e100
$(MAKE) -C e1000
- $(LD) -r -o driver.o e1000/e1000.o $(OBJS) ne/ne_drv.o
+ $(LD) -r -o driver.o e100/e100.o e1000/e1000.o $(OBJS) tulip/tulip.o
clean:
- $(MAKE) -C ne clean
+ $(MAKE) -C tulip clean
+ $(MAKE) -C e100 clean
$(MAKE) -C e1000 clean
rm -f *.o *~ core
--- /dev/null
+
+Recommended (cards tested and working at full efficiency):
+ 3com 3c905 (3c59x.c Linux driver)
+ Broadcom tg3 (tg3.c Linux driver)
+ Intel e1000 (e1000 Linux driver)
+ Intel e100 (e100 Linux driver) [*]
+
+Driver ported, but may not be tested, and won't work at full
+efficiency (extra copies are incurred within Xen):
+ ne2k, pcnet32/lance, rtl8139, tulip, via-rhine,
+
+*: Note that certain revisions of e100 do not support
+ scatter/gather DMA. These cards will incur an extra
+ copy within Xen for every transmitted packet.
\ No newline at end of file
--- /dev/null
+
+"This software program is licensed subject to the GNU General Public License
+(GPL). Version 2, June 1991, available at
+<http://www.fsf.org/copyleft/gpl.html>"
+
+GNU General Public License
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public License is intended
+to guarantee your freedom to share and change free software--to make sure
+the software is free for all its users. This General Public License applies
+to most of the Free Software Foundation's software and to any other program
+whose authors commit to using it. (Some other Free Software Foundation
+software is covered by the GNU Library General Public License instead.) You
+can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or
+for a fee, you must give the recipients all the rights that you have. You
+must make sure that they, too, receive or can get the source code. And you
+must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If
+the software is modified by someone else and passed on, we want its
+recipients to know that what they have is not the original, so that any
+problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification
+follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+ placed by the copyright holder saying it may be distributed under the
+ terms of this General Public License. The "Program", below, refers to any
+ such program or work, and a "work based on the Program" means either the
+ Program or any derivative work under copyright law: that is to say, a
+ work containing the Program or a portion of it, either verbatim or with
+ modifications and/or translated into another language. (Hereinafter,
+ translation is included without limitation in the term "modification".)
+ Each licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ the Program is not restricted, and the output from the Program is covered
+ only if its contents constitute a work based on the Program (independent
+ of having been made by running the Program). Whether that is true depends
+ on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code
+ as you receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice and
+ disclaimer of warranty; keep intact all the notices that refer to this
+ License and to the absence of any warranty; and give any other recipients
+ of the Program a copy of this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy, and you
+ may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it,
+ thus forming a work based on the Program, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ * a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ * b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any part
+ thereof, to be licensed as a whole at no charge to all third parties
+ under the terms of this License.
+
+ * c) If the modified program normally reads commands interactively when
+ run, you must cause it, when started running for such interactive
+ use in the most ordinary way, to print or display an announcement
+ including an appropriate copyright notice and a notice that there is
+ no warranty (or else, saying that you provide a warranty) and that
+ users may redistribute the program under these conditions, and
+ telling the user how to view a copy of this License. (Exception: if
+ the Program itself is interactive but does not normally print such
+ an announcement, your work based on the Program is not required to
+ print an announcement.)
+
+ These requirements apply to the modified work as a whole. If identifiable
+ sections of that work are not derived from the Program, and can be
+ reasonably considered independent and separate works in themselves, then
+ this License, and its terms, do not apply to those sections when you
+ distribute them as separate works. But when you distribute the same
+ sections as part of a whole which is a work based on the Program, the
+ distribution of the whole must be on the terms of this License, whose
+ permissions for other licensees extend to the entire whole, and thus to
+ each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Program.
+
+ In addition, mere aggregation of another work not based on the Program
+ with the Program (or with a work based on the Program) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+ Section 2) in object code or executable form under the terms of Sections
+ 1 and 2 above provided that you also do one of the following:
+
+ * a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2
+ above on a medium customarily used for software interchange; or,
+
+ * b) Accompany it with a written offer, valid for at least three years,
+ to give any third party, for a charge no more than your cost of
+ physically performing source distribution, a complete machine-
+ readable copy of the corresponding source code, to be distributed
+ under the terms of Sections 1 and 2 above on a medium customarily
+ used for software interchange; or,
+
+ * c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed
+ only for noncommercial distribution and only if you received the
+ program in object code or executable form with such an offer, in
+ accord with Subsection b above.)
+
+ The source code for a work means the preferred form of the work for
+ making modifications to it. For an executable work, complete source code
+ means all the source code for all modules it contains, plus any
+ associated interface definition files, plus the scripts used to control
+ compilation and installation of the executable. However, as a special
+ exception, the source code distributed need not include anything that is
+ normally distributed (in either source or binary form) with the major
+ components (compiler, kernel, and so on) of the operating system on which
+ the executable runs, unless that component itself accompanies the
+ executable.
+
+ If distribution of executable or object code is made by offering access
+ to copy from a designated place, then offering equivalent access to copy
+ the source code from the same place counts as distribution of the source
+ code, even though third parties are not compelled to copy the source
+ along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+ expressly provided under this License. Any attempt otherwise to copy,
+ modify, sublicense or distribute the Program is void, and will
+ automatically terminate your rights under this License. However, parties
+ who have received copies, or rights, from you under this License will not
+ have their licenses terminated so long as such parties remain in full
+ compliance.
+
+5. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Program or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Program (or any work based on the Program), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Program or works
+ based on it.
+
+6. Each time you redistribute the Program (or any work based on the
+ Program), the recipient automatically receives a license from the
+ original licensor to copy, distribute or modify the Program subject to
+ these terms and conditions. You may not impose any further restrictions
+ on the recipients' exercise of the rights granted herein. You are not
+ responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot distribute
+ so as to satisfy simultaneously your obligations under this License and
+ any other pertinent obligations, then as a consequence you may not
+ distribute the Program at all. For example, if a patent license would
+ not permit royalty-free redistribution of the Program by all those who
+ receive copies directly or indirectly through you, then the only way you
+ could satisfy both it and this License would be to refrain entirely from
+ distribution of the Program.
+
+ If any portion of this section is held invalid or unenforceable under any
+ particular circumstance, the balance of the section is intended to apply
+ and the section as a whole is intended to apply in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system, which is implemented
+ by public license practices. Many people have made generous contributions
+ to the wide range of software distributed through that system in
+ reliance on consistent application of that system; it is up to the
+ author/donor to decide if he or she is willing to distribute software
+ through any other system and a licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed to be
+ a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Program under this License may add an
+ explicit geographical distribution limitation excluding those countries,
+ so that distribution is permitted only in or among countries not thus
+ excluded. In such case, this License incorporates the limitation as if
+ written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of
+ the General Public License from time to time. Such new versions will be
+ similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Program
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Program does not specify a version
+ number of this License, you may choose any version ever published by the
+ Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+ whose distribution conditions are different, write to the author to ask
+ for permission. For software which is copyrighted by the Free Software
+ Foundation, write to the Free Software Foundation; we sometimes make
+ exceptions for this. Our decision will be guided by the two goals of
+ preserving the free status of all derivatives of our free software and
+ of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+ YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it free
+software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey the
+exclusion of warranty; and each file should have at least the "copyright"
+line and a pointer to where the full notice is found.
+
+one line to give the program's name and an idea of what it does.
+Copyright (C) yyyy name of author
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59
+Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when
+it starts in an interactive mode:
+
+Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
+with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
+software, and you are welcome to redistribute it under certain conditions;
+type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may be
+called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+signature of Ty Coon, 1 April 1989
+Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General Public
+License instead of this License.
--- /dev/null
+
+include $(BASEDIR)/Rules.mk
+
+default: $(OBJS)
+ $(LD) -r -o e100.o $(OBJS)
+
+clean:
+ rm -f *.o *~ core
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+#ifndef _E100_INC_
+#define _E100_INC_
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <linux/string.h>
+/*#include <linux/wait.h>*/
+#include <linux/reboot.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <asm/processor.h>
+#include <linux/ethtool.h>
+/*#include <linux/inetdevice.h>*/
+#include <asm/bitops.h>
+
+#include <linux/if.h>
+#include <asm/uaccess.h>
+/*#include <linux/ip.h>*/
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+
+#define yield() ((void)0)
+
+#define E100_REGS_LEN 1
+/*
+ * Configure parameters for buffers per controller.
+ * If the machine this is being used on is a faster machine (i.e. > 150MHz)
+ * and running on a 10MBS network then more queueing of data occurs. This
+ * may indicate the some of the numbers below should be adjusted. Here are
+ * some typical numbers:
+ * MAX_TCB 64
+ * MAX_RFD 64
+ * The default numbers give work well on most systems tests so no real
+ * adjustments really need to take place. Also, if the machine is connected
+ * to a 100MBS network the numbers described above can be lowered from the
+ * defaults as considerably less data will be queued.
+ */
+
+#define TX_FRAME_CNT 8 /* consecutive transmit frames per interrupt */
+/* TX_FRAME_CNT must be less than MAX_TCB */
+
+#define E100_DEFAULT_TCB 64
+#define E100_MIN_TCB 2*TX_FRAME_CNT + 3 /* make room for at least 2 interrupts */
+#define E100_MAX_TCB 1024
+
+#define E100_DEFAULT_RFD 64
+#define E100_MIN_RFD 8
+#define E100_MAX_RFD 1024
+
+#define E100_DEFAULT_XSUM true
+#define E100_DEFAULT_BER ZLOCK_MAX_ERRORS
+#define E100_DEFAULT_SPEED_DUPLEX 0
+#define E100_DEFAULT_FC 0
+#define E100_DEFAULT_IFS true
+#define E100_DEFAULT_UCODE true
+
+#define TX_THRSHLD 8
+
+/* IFS parameters */
+#define MIN_NUMBER_OF_TRANSMITS_100 1000
+#define MIN_NUMBER_OF_TRANSMITS_10 100
+
+#define E100_MAX_NIC 16
+
+#define E100_MAX_SCB_WAIT 100 /* Max udelays in wait_scb */
+#define E100_MAX_CU_IDLE_WAIT 50 /* Max udelays in wait_cus_idle */
+
+/* HWI feature related constant */
+#define HWI_MAX_LOOP 100
+#define MAX_SAME_RESULTS 3
+#define HWI_REGISTER_GRANULARITY 80 /* register granularity = 80 Cm */
+#define HWI_NEAR_END_BOUNDARY 1000 /* Near end is defined as < 10 meters */
+
+/* CPUSAVER_BUNDLE_MAX: Sets the maximum number of frames that will be bundled.
+ * In some situations, such as the TCP windowing algorithm, it may be
+ * better to limit the growth of the bundle size than let it go as
+ * high as it can, because that could cause too much added latency.
+ * The default is six, because this is the number of packets in the
+ * default TCP window size. A value of 1 would make CPUSaver indicate
+ * an interrupt for every frame received. If you do not want to put
+ * a limit on the bundle size, set this value to xFFFF.
+ */
+#define E100_DEFAULT_CPUSAVER_BUNDLE_MAX 6
+#define E100_DEFAULT_CPUSAVER_INTERRUPT_DELAY 0x600
+#define E100_DEFAULT_BUNDLE_SMALL_FR false
+
+/* end of configurables */
+
+/* ====================================================================== */
+/* hw */
+/* ====================================================================== */
+
+/* timeout for command completion */
+#define E100_CMD_WAIT 100 /* iterations */
+
+struct driver_stats {
+ struct net_device_stats net_stats;
+
+ unsigned long tx_late_col;
+ unsigned long tx_ok_defrd;
+ unsigned long tx_one_retry;
+ unsigned long tx_mt_one_retry;
+ unsigned long rcv_cdt_frames;
+ unsigned long xmt_fc_pkts;
+ unsigned long rcv_fc_pkts;
+ unsigned long rcv_fc_unsupported;
+ unsigned long xmt_tco_pkts;
+ unsigned long rcv_tco_pkts;
+ unsigned long rx_intr_pkts;
+};
+
+/* TODO: kill me when we can do C99 */
+#define false (0)
+#define true (1)
+
+/* Changed for 82558 and 82559 enhancements */
+/* defines for 82558/9 flow control CSR values */
+#define DFLT_FC_THLD 0x00 /* Rx FIFO threshold of 0.5KB free */
+#define DFLT_FC_CMD 0x00 /* FC Command in CSR */
+
+/* ====================================================================== */
+/* equates */
+/* ====================================================================== */
+
+/*
+ * These are general purpose defines
+ */
+
+/* Bit Mask definitions */
+#define BIT_0 0x0001
+#define BIT_1 0x0002
+#define BIT_2 0x0004
+#define BIT_3 0x0008
+#define BIT_4 0x0010
+#define BIT_5 0x0020
+#define BIT_6 0x0040
+#define BIT_7 0x0080
+#define BIT_8 0x0100
+#define BIT_9 0x0200
+#define BIT_10 0x0400
+#define BIT_11 0x0800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_28 0x10000000
+
+#define BIT_0_2 0x0007
+#define BIT_0_3 0x000F
+#define BIT_0_4 0x001F
+#define BIT_0_5 0x003F
+#define BIT_0_6 0x007F
+#define BIT_0_7 0x00FF
+#define BIT_0_8 0x01FF
+#define BIT_0_13 0x3FFF
+#define BIT_0_15 0xFFFF
+#define BIT_1_2 0x0006
+#define BIT_1_3 0x000E
+#define BIT_2_5 0x003C
+#define BIT_3_4 0x0018
+#define BIT_4_5 0x0030
+#define BIT_4_6 0x0070
+#define BIT_4_7 0x00F0
+#define BIT_5_7 0x00E0
+#define BIT_5_12 0x1FE0
+#define BIT_5_15 0xFFE0
+#define BIT_6_7 0x00c0
+#define BIT_7_11 0x0F80
+#define BIT_8_10 0x0700
+#define BIT_9_13 0x3E00
+#define BIT_12_15 0xF000
+#define BIT_8_15 0xFF00
+
+#define BIT_16_20 0x001F0000
+#define BIT_21_25 0x03E00000
+#define BIT_26_27 0x0C000000
+
+/* Transmit Threshold related constants */
+#define DEFAULT_TX_PER_UNDERRUN 20000
+
+#define MAX_MULTICAST_ADDRS 64
+#define MAX_FILTER 16
+
+#define FULL_DUPLEX 2
+#define HALF_DUPLEX 1
+
+/*
+ * These defines are specific to the 82557
+ */
+
+/* E100 PORT functions -- lower 4 bits */
+#define PORT_SOFTWARE_RESET 0
+#define PORT_SELFTEST 1
+#define PORT_SELECTIVE_RESET 2
+#define PORT_DUMP 3
+
+/* SCB Status Word bit definitions */
+/* Interrupt status/ack fields */
+/* ER and FCP interrupts for 82558 masks */
+#define SCB_STATUS_ACK_MASK BIT_8_15 /* Status Mask */
+#define SCB_STATUS_ACK_CX BIT_15 /* CU Completed Action Cmd */
+#define SCB_STATUS_ACK_FR BIT_14 /* RU Received A Frame */
+#define SCB_STATUS_ACK_CNA BIT_13 /* CU Became Inactive (IDLE) */
+#define SCB_STATUS_ACK_RNR BIT_12 /* RU Became Not Ready */
+#define SCB_STATUS_ACK_MDI BIT_11 /* MDI read or write done */
+#define SCB_STATUS_ACK_SWI BIT_10 /* S/W generated interrupt */
+#define SCB_STATUS_ACK_ER BIT_9 /* Early Receive */
+#define SCB_STATUS_ACK_FCP BIT_8 /* Flow Control Pause */
+
+/*- CUS Fields */
+#define SCB_CUS_MASK (BIT_6 | BIT_7) /* CUS 2-bit Mask */
+#define SCB_CUS_IDLE 0 /* CU Idle */
+#define SCB_CUS_SUSPEND BIT_6 /* CU Suspended */
+#define SCB_CUS_ACTIVE BIT_7 /* CU Active */
+
+/*- RUS Fields */
+#define SCB_RUS_IDLE 0 /* RU Idle */
+#define SCB_RUS_MASK BIT_2_5 /* RUS 3-bit Mask */
+#define SCB_RUS_SUSPEND BIT_2 /* RU Suspended */
+#define SCB_RUS_NO_RESOURCES BIT_3 /* RU Out Of Resources */
+#define SCB_RUS_READY BIT_4 /* RU Ready */
+#define SCB_RUS_SUSP_NO_RBDS (BIT_2 | BIT_5) /* RU No More RBDs */
+#define SCB_RUS_NO_RBDS (BIT_3 | BIT_5) /* RU No More RBDs */
+#define SCB_RUS_READY_NO_RBDS (BIT_4 | BIT_5) /* RU Ready, No RBDs */
+
+/* SCB Command Word bit definitions */
+/*- CUC fields */
+/* Changing mask to 4 bits */
+#define SCB_CUC_MASK BIT_4_7 /* CUC 4-bit Mask */
+#define SCB_CUC_NOOP 0
+#define SCB_CUC_START BIT_4 /* CU Start */
+#define SCB_CUC_RESUME BIT_5 /* CU Resume */
+#define SCB_CUC_UNKNOWN BIT_7 /* CU unknown command */
+/* Changed for 82558 enhancements */
+#define SCB_CUC_STATIC_RESUME (BIT_5 | BIT_7) /* 82558/9 Static Resume */
+#define SCB_CUC_DUMP_ADDR BIT_6 /* CU Dump Counters Address */
+#define SCB_CUC_DUMP_STAT (BIT_4 | BIT_6) /* CU Dump stat. counters */
+#define SCB_CUC_LOAD_BASE (BIT_5 | BIT_6) /* Load the CU base */
+/* Below was defined as BIT_4_7 */
+#define SCB_CUC_DUMP_RST_STAT BIT_4_6 /* CU Dump & reset statistics cntrs */
+
+/*- RUC fields */
+#define SCB_RUC_MASK BIT_0_2 /* RUC 3-bit Mask */
+#define SCB_RUC_START BIT_0 /* RU Start */
+#define SCB_RUC_RESUME BIT_1 /* RU Resume */
+#define SCB_RUC_ABORT BIT_2 /* RU Abort */
+#define SCB_RUC_LOAD_HDS (BIT_0 | BIT_2) /* Load RFD Header Data Size */
+#define SCB_RUC_LOAD_BASE (BIT_1 | BIT_2) /* Load the RU base */
+#define SCB_RUC_RBD_RESUME BIT_0_2 /* RBD resume */
+
+/* Interrupt fields (assuming byte addressing) */
+#define SCB_INT_MASK BIT_0 /* Mask interrupts */
+#define SCB_SOFT_INT BIT_1 /* Generate a S/W interrupt */
+/* Specific Interrupt Mask Bits (upper byte of SCB Command word) */
+#define SCB_FCP_INT_MASK BIT_2 /* Flow Control Pause */
+#define SCB_ER_INT_MASK BIT_3 /* Early Receive */
+#define SCB_RNR_INT_MASK BIT_4 /* RU Not Ready */
+#define SCB_CNA_INT_MASK BIT_5 /* CU Not Active */
+#define SCB_FR_INT_MASK BIT_6 /* Frame Received */
+#define SCB_CX_INT_MASK BIT_7 /* CU eXecution w/ I-bit done */
+#define SCB_BACHELOR_INT_MASK BIT_2_7 /* 82558 interrupt mask bits */
+
+#define SCB_GCR2_EEPROM_ACCESS_SEMAPHORE BIT_7
+
+/* EEPROM bit definitions */
+/*- EEPROM control register bits */
+#define EEPROM_FLAG_ASF 0x8000
+#define EEPROM_FLAG_GCL 0x4000
+
+#define EN_TRNF 0x10 /* Enable turnoff */
+#define EEDO 0x08 /* EEPROM data out */
+#define EEDI 0x04 /* EEPROM data in (set for writing data) */
+#define EECS 0x02 /* EEPROM chip select (1=hi, 0=lo) */
+#define EESK 0x01 /* EEPROM shift clock (1=hi, 0=lo) */
+
+/*- EEPROM opcodes */
+#define EEPROM_READ_OPCODE 06
+#define EEPROM_WRITE_OPCODE 05
+#define EEPROM_ERASE_OPCODE 07
+#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
+#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
+
+/*- EEPROM data locations */
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_COMPATIBILITY_WORD 3
+#define EEPROM_PWA_NO 8
+#define EEPROM_ID_WORD 0x0A
+#define EEPROM_CONFIG_ASF 0x0D
+#define EEPROM_SMBUS_ADDR 0x90
+
+#define EEPROM_SUM 0xbaba
+
+// Zero Locking Algorithm definitions:
+#define ZLOCK_ZERO_MASK 0x00F0
+#define ZLOCK_MAX_READS 50
+#define ZLOCK_SET_ZERO 0x2010
+#define ZLOCK_MAX_SLEEP 300 * HZ
+#define ZLOCK_MAX_ERRORS 300
+
+/* E100 Action Commands */
+#define CB_IA_ADDRESS 1
+#define CB_CONFIGURE 2
+#define CB_MULTICAST 3
+#define CB_TRANSMIT 4
+#define CB_LOAD_MICROCODE 5
+#define CB_LOAD_FILTER 8
+#define CB_MAX_NONTX_CMD 9
+#define CB_IPCB_TRANSMIT 9
+
+/* Pre-defined Filter Bits */
+#define CB_FILTER_EL 0x80000000
+#define CB_FILTER_FIX 0x40000000
+#define CB_FILTER_ARP 0x08000000
+#define CB_FILTER_IA_MATCH 0x02000000
+
+/* Command Block (CB) Field Definitions */
+/*- CB Command Word */
+#define CB_EL_BIT BIT_15 /* CB EL Bit */
+#define CB_S_BIT BIT_14 /* CB Suspend Bit */
+#define CB_I_BIT BIT_13 /* CB Interrupt Bit */
+#define CB_TX_SF_BIT BIT_3 /* TX CB Flexible Mode */
+#define CB_CMD_MASK BIT_0_3 /* CB 4-bit CMD Mask */
+#define CB_CID_DEFAULT (0x1f << 8) /* CB 5-bit CID (max value) */
+
+/*- CB Status Word */
+#define CB_STATUS_MASK BIT_12_15 /* CB Status Mask (4-bits) */
+#define CB_STATUS_COMPLETE BIT_15 /* CB Complete Bit */
+#define CB_STATUS_OK BIT_13 /* CB OK Bit */
+#define CB_STATUS_VLAN BIT_12 /* CB Valn detected Bit */
+#define CB_STATUS_FAIL BIT_11 /* CB Fail (F) Bit */
+
+/*misc command bits */
+#define CB_TX_EOF_BIT BIT_15 /* TX CB/TBD EOF Bit */
+
+/* Config params */
+#define CB_CFIG_BYTE_COUNT 22 /* 22 config bytes */
+#define CB_CFIG_D102_BYTE_COUNT 10
+
+/* Receive Frame Descriptor Fields */
+
+/*- RFD Status Bits */
+#define RFD_RECEIVE_COLLISION BIT_0 /* Collision detected on Receive */
+#define RFD_IA_MATCH BIT_1 /* Indv Address Match Bit */
+#define RFD_RX_ERR BIT_4 /* RX_ERR pin on Phy was set */
+#define RFD_FRAME_TOO_SHORT BIT_7 /* Receive Frame Short */
+#define RFD_DMA_OVERRUN BIT_8 /* Receive DMA Overrun */
+#define RFD_NO_RESOURCES BIT_9 /* No Buffer Space */
+#define RFD_ALIGNMENT_ERROR BIT_10 /* Alignment Error */
+#define RFD_CRC_ERROR BIT_11 /* CRC Error */
+#define RFD_STATUS_OK BIT_13 /* RFD OK Bit */
+#define RFD_STATUS_COMPLETE BIT_15 /* RFD Complete Bit */
+
+/*- RFD Command Bits*/
+#define RFD_EL_BIT BIT_15 /* RFD EL Bit */
+#define RFD_S_BIT BIT_14 /* RFD Suspend Bit */
+#define RFD_H_BIT BIT_4 /* Header RFD Bit */
+#define RFD_SF_BIT BIT_3 /* RFD Flexible Mode */
+
+/*- RFD misc bits*/
+#define RFD_EOF_BIT BIT_15 /* RFD End-Of-Frame Bit */
+#define RFD_F_BIT BIT_14 /* RFD Buffer Fetch Bit */
+#define RFD_ACT_COUNT_MASK BIT_0_13 /* RFD Actual Count Mask */
+
+/* Receive Buffer Descriptor Fields*/
+#define RBD_EOF_BIT BIT_15 /* RBD End-Of-Frame Bit */
+#define RBD_F_BIT BIT_14 /* RBD Buffer Fetch Bit */
+#define RBD_ACT_COUNT_MASK BIT_0_13 /* RBD Actual Count Mask */
+
+#define SIZE_FIELD_MASK BIT_0_13 /* Size of the associated buffer */
+#define RBD_EL_BIT BIT_15 /* RBD EL Bit */
+
+/* Self Test Results*/
+#define CB_SELFTEST_FAIL_BIT BIT_12
+#define CB_SELFTEST_DIAG_BIT BIT_5
+#define CB_SELFTEST_REGISTER_BIT BIT_3
+#define CB_SELFTEST_ROM_BIT BIT_2
+
+#define CB_SELFTEST_ERROR_MASK ( \
+ CB_SELFTEST_FAIL_BIT | CB_SELFTEST_DIAG_BIT | \
+ CB_SELFTEST_REGISTER_BIT | CB_SELFTEST_ROM_BIT)
+
+/* adapter vendor & device ids */
+#define PCI_OHIO_BOARD 0x10f0 /* subdevice ID, Ohio dual port nic */
+
+/* Values for PCI_REV_ID_REGISTER values */
+#define D101A4_REV_ID 4 /* 82558 A4 stepping */
+#define D101B0_REV_ID 5 /* 82558 B0 stepping */
+#define D101MA_REV_ID 8 /* 82559 A0 stepping */
+#define D101S_REV_ID 9 /* 82559S A-step */
+#define D102_REV_ID 12
+#define D102C_REV_ID 13 /* 82550 step C */
+#define D102E_REV_ID 15
+
+/* ############Start of 82555 specific defines################## */
+
+#define PHY_82555_LED_SWITCH_CONTROL 0x1b /* 82555 led switch control register */
+
+/* 82555 led switch control reg. opcodes */
+#define PHY_82555_LED_NORMAL_CONTROL 0 // control back to the 8255X
+#define PHY_82555_LED_DRIVER_CONTROL BIT_2 // the driver is in control
+#define PHY_82555_LED_OFF BIT_2 // activity LED is off
+#define PHY_82555_LED_ON_559 (BIT_0 | BIT_2) // activity LED is on for 559 and later
+#define PHY_82555_LED_ON_PRE_559 (BIT_0 | BIT_1 | BIT_2) // activity LED is on for 558 and before
+
+// Describe the state of the phy led.
+// needed for the function : 'e100_blink_timer'
+enum led_state_e {
+ LED_OFF = 0,
+ LED_ON,
+};
+
+/* ############End of 82555 specific defines##################### */
+
+#define RFD_PARSE_BIT BIT_3
+#define RFD_TCP_PACKET 0x00
+#define RFD_UDP_PACKET 0x01
+#define TCPUDP_CHECKSUM_BIT_VALID BIT_4
+#define TCPUDP_CHECKSUM_VALID BIT_5
+#define CHECKSUM_PROTOCOL_MASK 0x03
+
+#define VLAN_SIZE 4
+#define CHKSUM_SIZE 2
+#define RFD_DATA_SIZE (ETH_FRAME_LEN + CHKSUM_SIZE + VLAN_SIZE)
+
+/* Bits for bdp->flags */
+#define DF_LINK_FC_CAP 0x00000001 /* Link is flow control capable */
+#define DF_CSUM_OFFLOAD 0x00000002
+#define DF_UCODE_LOADED 0x00000004
+#define USE_IPCB 0x00000008 /* set if using ipcb for transmits */
+#define IS_BACHELOR 0x00000010 /* set if 82558 or newer board */
+#define IS_ICH 0x00000020
+#define DF_SPEED_FORCED 0x00000040 /* set if speed is forced */
+#define LED_IS_ON 0x00000080 /* LED is turned ON by the driver */
+#define DF_LINK_FC_TX_ONLY 0x00000100 /* Received PAUSE frames are honored*/
+
+typedef struct net_device_stats net_dev_stats_t;
+
+/* needed macros */
+/* These macros use the bdp pointer. If you use them it better be defined */
+#define PREV_TCB_USED(X) ((X).tail ? (X).tail - 1 : bdp->params.TxDescriptors - 1)
+#define NEXT_TCB_TOUSE(X) ((((X) + 1) >= bdp->params.TxDescriptors) ? 0 : (X) + 1)
+#define TCB_TO_USE(X) ((X).tail)
+#define TCBS_AVAIL(X) (NEXT_TCB_TOUSE( NEXT_TCB_TOUSE((X).tail)) != (X).head)
+
+#define RFD_POINTER(skb,bdp) ((rfd_t *) (((unsigned char *)((skb)->data))-((bdp)->rfd_size)))
+#define SKB_RFD_STATUS(skb,bdp) ((RFD_POINTER((skb),(bdp)))->rfd_header.cb_status)
+
+/* ====================================================================== */
+/* 82557 */
+/* ====================================================================== */
+
+/* Changed for 82558 enhancement */
+typedef struct _d101_scb_ext_t {
+ u32 scb_rx_dma_cnt; /* Rx DMA byte count */
+ u8 scb_early_rx_int; /* Early Rx DMA byte count */
+ u8 scb_fc_thld; /* Flow Control threshold */
+ u8 scb_fc_xon_xoff; /* Flow Control XON/XOFF values */
+ u8 scb_pmdr; /* Power Mgmt. Driver Reg */
+} d101_scb_ext __attribute__ ((__packed__));
+
+/* Changed for 82559 enhancement */
+typedef struct _d101m_scb_ext_t {
+ u32 scb_rx_dma_cnt; /* Rx DMA byte count */
+ u8 scb_early_rx_int; /* Early Rx DMA byte count */
+ u8 scb_fc_thld; /* Flow Control threshold */
+ u8 scb_fc_xon_xoff; /* Flow Control XON/XOFF values */
+ u8 scb_pmdr; /* Power Mgmt. Driver Reg */
+ u8 scb_gen_ctrl; /* General Control */
+ u8 scb_gen_stat; /* General Status */
+ u16 scb_reserved; /* Reserved */
+ u32 scb_function_event; /* Cardbus Function Event */
+ u32 scb_function_event_mask; /* Cardbus Function Mask */
+ u32 scb_function_present_state; /* Cardbus Function state */
+ u32 scb_force_event; /* Cardbus Force Event */
+} d101m_scb_ext __attribute__ ((__packed__));
+
+/* Changed for 82550 enhancement */
+typedef struct _d102_scb_ext_t {
+ u32 scb_rx_dma_cnt; /* Rx DMA byte count */
+ u8 scb_early_rx_int; /* Early Rx DMA byte count */
+ u8 scb_fc_thld; /* Flow Control threshold */
+ u8 scb_fc_xon_xoff; /* Flow Control XON/XOFF values */
+ u8 scb_pmdr; /* Power Mgmt. Driver Reg */
+ u8 scb_gen_ctrl; /* General Control */
+ u8 scb_gen_stat; /* General Status */
+ u8 scb_gen_ctrl2;
+ u8 scb_reserved; /* Reserved */
+ u32 scb_scheduling_reg;
+ u32 scb_reserved2;
+ u32 scb_function_event; /* Cardbus Function Event */
+ u32 scb_function_event_mask; /* Cardbus Function Mask */
+ u32 scb_function_present_state; /* Cardbus Function state */
+ u32 scb_force_event; /* Cardbus Force Event */
+} d102_scb_ext __attribute__ ((__packed__));
+
+/*
+ * 82557 status control block. this will be memory mapped & will hang of the
+ * the bdp, which hangs of the bdp. This is the brain of it.
+ */
+typedef struct _scb_t {
+ u16 scb_status; /* SCB Status register */
+ u8 scb_cmd_low; /* SCB Command register (low byte) */
+ u8 scb_cmd_hi; /* SCB Command register (high byte) */
+ u32 scb_gen_ptr; /* SCB General pointer */
+ u32 scb_port; /* PORT register */
+ u16 scb_flsh_cntrl; /* Flash Control register */
+ u16 scb_eprm_cntrl; /* EEPROM control register */
+ u32 scb_mdi_cntrl; /* MDI Control Register */
+ /* Changed for 82558 enhancement */
+ union {
+ u32 scb_rx_dma_cnt; /* Rx DMA byte count */
+ d101_scb_ext d101_scb; /* 82558/9 specific fields */
+ d101m_scb_ext d101m_scb; /* 82559 specific fields */
+ d102_scb_ext d102_scb;
+ } scb_ext;
+} scb_t __attribute__ ((__packed__));
+
+/* Self test
+ * This is used to dump results of the self test
+ */
+typedef struct _self_test_t {
+ u32 st_sign; /* Self Test Signature */
+ u32 st_result; /* Self Test Results */
+} self_test_t __attribute__ ((__packed__));
+
+/*
+ * Statistical Counters
+ */
+/* 82557 counters */
+typedef struct _basic_cntr_t {
+ u32 xmt_gd_frames; /* Good frames transmitted */
+ u32 xmt_max_coll; /* Fatal frames -- had max collisions */
+ u32 xmt_late_coll; /* Fatal frames -- had a late coll. */
+ u32 xmt_uruns; /* Xmit underruns (fatal or re-transmit) */
+ u32 xmt_lost_crs; /* Frames transmitted without CRS */
+ u32 xmt_deferred; /* Deferred transmits */
+ u32 xmt_sngl_coll; /* Transmits that had 1 and only 1 coll. */
+ u32 xmt_mlt_coll; /* Transmits that had multiple coll. */
+ u32 xmt_ttl_coll; /* Transmits that had 1+ collisions. */
+ u32 rcv_gd_frames; /* Good frames received */
+ u32 rcv_crc_errs; /* Aligned frames that had a CRC error */
+ u32 rcv_algn_errs; /* Receives that had alignment errors */
+ u32 rcv_rsrc_err; /* Good frame dropped cuz no resources */
+ u32 rcv_oruns; /* Overrun errors - bus was busy */
+ u32 rcv_err_coll; /* Received frms. that encountered coll. */
+ u32 rcv_shrt_frames; /* Received frames that were to short */
+} basic_cntr_t;
+
+/* 82558 extended statistic counters */
+typedef struct _ext_cntr_t {
+ u32 xmt_fc_frames;
+ u32 rcv_fc_frames;
+ u32 rcv_fc_unsupported;
+} ext_cntr_t;
+
+/* 82559 TCO statistic counters */
+typedef struct _tco_cntr_t {
+ u16 xmt_tco_frames;
+ u16 rcv_tco_frames;
+} tco_cntr_t;
+
+/* Structures to access thet physical dump area */
+/* Use one of these types, according to the statisitcal counters mode,
+ to cast the pointer to the physical dump area and access the cmd_complete
+ DWORD. */
+
+/* 557-mode : only basic counters + cmd_complete */
+typedef struct _err_cntr_557_t {
+ basic_cntr_t basic_stats;
+ u32 cmd_complete;
+} err_cntr_557_t;
+
+/* 558-mode : basic + extended counters + cmd_complete */
+typedef struct _err_cntr_558_t {
+ basic_cntr_t basic_stats;
+ ext_cntr_t extended_stats;
+ u32 cmd_complete;
+} err_cntr_558_t;
+
+/* 559-mode : basic + extended + TCO counters + cmd_complete */
+typedef struct _err_cntr_559_t {
+ basic_cntr_t basic_stats;
+ ext_cntr_t extended_stats;
+ tco_cntr_t tco_stats;
+ u32 cmd_complete;
+} err_cntr_559_t;
+
+/* This typedef defines the struct needed to hold the largest number of counters */
+typedef err_cntr_559_t max_counters_t;
+
+/* Different statistical-counters mode the controller may be in */
+typedef enum _stat_mode_t {
+ E100_BASIC_STATS = 0, /* 82557 stats : 16 counters / 16 dw */
+ E100_EXTENDED_STATS, /* 82558 stats : 19 counters / 19 dw */
+ E100_TCO_STATS /* 82559 stats : 21 counters / 20 dw */
+} stat_mode_t;
+
+/* dump statistical counters complete codes */
+#define DUMP_STAT_COMPLETED 0xA005
+#define DUMP_RST_STAT_COMPLETED 0xA007
+
+/* Command Block (CB) Generic Header Structure*/
+typedef struct _cb_header_t {
+ u16 cb_status; /* Command Block Status */
+ u16 cb_cmd; /* Command Block Command */
+ u32 cb_lnk_ptr; /* Link To Next CB */
+} cb_header_t __attribute__ ((__packed__));
+
+//* Individual Address Command Block (IA_CB)*/
+typedef struct _ia_cb_t {
+ cb_header_t ia_cb_hdr;
+ u8 ia_addr[ETH_ALEN];
+} ia_cb_t __attribute__ ((__packed__));
+
+/* Configure Command Block (CONFIG_CB)*/
+typedef struct _config_cb_t {
+ cb_header_t cfg_cbhdr;
+ u8 cfg_byte[CB_CFIG_BYTE_COUNT + CB_CFIG_D102_BYTE_COUNT];
+} config_cb_t __attribute__ ((__packed__));
+
+/* MultiCast Command Block (MULTICAST_CB)*/
+typedef struct _multicast_cb_t {
+ cb_header_t mc_cbhdr;
+ u16 mc_count; /* Number of multicast addresses */
+ u8 mc_addr[(ETH_ALEN * MAX_MULTICAST_ADDRS)];
+} mltcst_cb_t __attribute__ ((__packed__));
+
+#define UCODE_MAX_DWORDS 134
+/* Load Microcode Command Block (LOAD_UCODE_CB)*/
+typedef struct _load_ucode_cb_t {
+ cb_header_t load_ucode_cbhdr;
+ u32 ucode_dword[UCODE_MAX_DWORDS];
+} load_ucode_cb_t __attribute__ ((__packed__));
+
+/* Load Programmable Filter Data*/
+typedef struct _filter_cb_t {
+ cb_header_t filter_cb_hdr;
+ u32 filter_data[MAX_FILTER];
+} filter_cb_t __attribute__ ((__packed__));
+
+/* NON_TRANSMIT_CB -- Generic Non-Transmit Command Block
+ */
+typedef struct _nxmit_cb_t {
+ union {
+ config_cb_t config;
+ ia_cb_t setup;
+ load_ucode_cb_t load_ucode;
+ mltcst_cb_t multicast;
+ filter_cb_t filter;
+ } ntcb;
+} nxmit_cb_t __attribute__ ((__packed__));
+
+/*Block for queuing for postponed execution of the non-transmit commands*/
+typedef struct _nxmit_cb_entry_t {
+ struct list_head list_elem;
+ nxmit_cb_t *non_tx_cmd;
+ dma_addr_t dma_addr;
+ unsigned long expiration_time;
+} nxmit_cb_entry_t;
+
+/* States for postponed non tx commands execution */
+typedef enum _non_tx_cmd_state_t {
+ E100_NON_TX_IDLE = 0, /* No queued NON-TX commands */
+ E100_WAIT_TX_FINISH, /* Wait for completion of the TX activities */
+ E100_WAIT_NON_TX_FINISH /* Wait for completion of the non TX command */
+} non_tx_cmd_state_t;
+
+/* some defines for the ipcb */
+#define IPCB_IP_CHECKSUM_ENABLE BIT_4
+#define IPCB_TCPUDP_CHECKSUM_ENABLE BIT_5
+#define IPCB_TCP_PACKET BIT_6
+#define IPCB_LARGESEND_ENABLE BIT_7
+#define IPCB_HARDWAREPARSING_ENABLE BIT_0
+#define IPCB_INSERTVLAN_ENABLE BIT_1
+#define IPCB_IP_ACTIVATION_DEFAULT IPCB_HARDWAREPARSING_ENABLE
+
+/* Transmit Buffer Descriptor (TBD)*/
+typedef struct _tbd_t {
+ u32 tbd_buf_addr; /* Physical Transmit Buffer Address */
+ u16 tbd_buf_cnt; /* Actual Count Of Bytes */
+ u16 padd;
+} tbd_t __attribute__ ((__packed__));
+
+/* d102 specific fields */
+typedef struct _tcb_ipcb_t {
+ u16 schedule_low;
+ u8 ip_schedule;
+ u8 ip_activation_high;
+ u16 vlan;
+ u8 ip_header_offset;
+ u8 tcp_header_offset;
+ union {
+ u32 sec_rec_phys_addr;
+ u32 tbd_zero_address;
+ } tbd_sec_addr;
+ union {
+ u16 sec_rec_size;
+ u16 tbd_zero_size;
+ } tbd_sec_size;
+ u16 total_tcp_payload;
+} tcb_ipcb_t __attribute__ ((__packed__));
+
+#define E100_TBD_ARRAY_SIZE (2+MAX_SKB_FRAGS)
+
+/* Transmit Command Block (TCB)*/
+struct _tcb_t {
+ cb_header_t tcb_hdr;
+ u32 tcb_tbd_ptr; /* TBD address */
+ u16 tcb_cnt; /* Data Bytes In TCB past header */
+ u8 tcb_thrshld; /* TX Threshold for FIFO Extender */
+ u8 tcb_tbd_num;
+
+ union {
+ tcb_ipcb_t ipcb; /* d102 ipcb fields */
+ tbd_t tbd_array[E100_TBD_ARRAY_SIZE];
+ } tcbu;
+
+ /* From here onward we can dump anything we want as long as the
+ * size of the total structure is a multiple of a paragraph
+ * boundary ( i.e. -16 bit aligned ).
+ */
+ tbd_t *tbd_ptr;
+
+ u32 tcb_tbd_dflt_ptr; /* TBD address for non-segmented packet */
+ u32 tcb_tbd_expand_ptr; /* TBD address for segmented packet */
+
+ struct sk_buff *tcb_skb; /* the associated socket buffer */
+ dma_addr_t tcb_phys; /* phys addr of the TCB */
+} __attribute__ ((__packed__));
+
+#define _TCB_T_
+typedef struct _tcb_t tcb_t;
+
+/* Receive Frame Descriptor (RFD) - will be using the simple model*/
+struct _rfd_t {
+ /* 8255x */
+ cb_header_t rfd_header;
+ u32 rfd_rbd_ptr; /* Receive Buffer Descriptor Addr */
+ u16 rfd_act_cnt; /* Number Of Bytes Received */
+ u16 rfd_sz; /* Number Of Bytes In RFD */
+ /* D102 aka Gamla */
+ u16 vlanid;
+ u8 rcvparserstatus;
+ u8 reserved;
+ u16 securitystatus;
+ u8 checksumstatus;
+ u8 zerocopystatus;
+ u8 pad[8]; /* data should be 16 byte aligned */
+ u8 data[RFD_DATA_SIZE];
+
+} __attribute__ ((__packed__));
+
+#define _RFD_T_
+typedef struct _rfd_t rfd_t;
+
+/* Receive Buffer Descriptor (RBD)*/
+typedef struct _rbd_t {
+ u16 rbd_act_cnt; /* Number Of Bytes Received */
+ u16 rbd_filler;
+ u32 rbd_lnk_addr; /* Link To Next RBD */
+ u32 rbd_rcb_addr; /* Receive Buffer Address */
+ u16 rbd_sz; /* Receive Buffer Size */
+ u16 rbd_filler1;
+} rbd_t __attribute__ ((__packed__));
+
+/*
+ * This structure is used to maintain a FIFO access to a resource that is
+ * maintained as a circular queue. The resource to be maintained is pointed
+ * to by the "data" field in the structure below. In this driver the TCBs',
+ * TBDs' & RFDs' are maintained as a circular queue & are managed thru this
+ * structure.
+ */
+typedef struct _buf_pool_t {
+ unsigned int head; /* index to first used resource */
+ unsigned int tail; /* index to last used resource */
+ void *data; /* points to resource pool */
+} buf_pool_t;
+
+/*Rx skb holding structure*/
+struct rx_list_elem {
+ struct list_head list_elem;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+enum next_cu_cmd_e { RESUME_NO_WAIT = 0, RESUME_WAIT, START_WAIT };
+enum zlock_state_e { ZLOCK_INITIAL, ZLOCK_READING, ZLOCK_SLEEPING };
+enum tx_queue_stop_type { LONG_STOP = 0, SHORT_STOP };
+
+/* 64 bit aligned size */
+#define E100_SIZE_64A(X) ((sizeof(X) + 7) & ~0x7)
+
+typedef struct _bd_dma_able_t {
+ char selftest[E100_SIZE_64A(self_test_t)];
+ char stats_counters[E100_SIZE_64A(max_counters_t)];
+} bd_dma_able_t;
+
+/* bit masks for bool parameters */
+#define PRM_XSUMRX 0x00000001
+#define PRM_UCODE 0x00000002
+#define PRM_FC 0x00000004
+#define PRM_IFS 0x00000008
+#define PRM_BUNDLE_SMALL 0x00000010
+
+struct cfg_params {
+ int e100_speed_duplex;
+ int RxDescriptors;
+ int TxDescriptors;
+ int IntDelay;
+ int BundleMax;
+ int ber;
+ u32 b_params;
+};
+struct ethtool_lpbk_data{
+ dma_addr_t dma_handle;
+ tcb_t *tcb;
+ rfd_t *rfd;
+
+};
+
+struct e100_private {
+ struct vlan_group *vlgrp;
+ u32 flags; /* board management flags */
+ u32 tx_per_underrun; /* number of good tx frames per underrun */
+ unsigned int tx_count; /* count of tx frames, so we can request an interrupt */
+ u8 tx_thld; /* stores transmit threshold */
+ u16 eeprom_size;
+ u32 pwa_no; /* PWA: xxxxxx-0xx */
+ u8 perm_node_address[ETH_ALEN];
+ struct list_head active_rx_list; /* list of rx buffers */
+ struct list_head rx_struct_pool; /* pool of rx buffer struct headers */
+ u16 rfd_size; /* size of the adapter's RFD struct */
+ int skb_req; /* number of skbs neede by the adapter */
+ u8 intr_mask; /* mask for interrupt status */
+
+ void *dma_able; /* dma allocated structs */
+ dma_addr_t dma_able_phys;
+ self_test_t *selftest; /* pointer to self test area */
+ dma_addr_t selftest_phys; /* phys addr of selftest */
+ max_counters_t *stats_counters; /* pointer to stats table */
+ dma_addr_t stat_cnt_phys; /* phys addr of stat counter area */
+
+ stat_mode_t stat_mode; /* statistics mode: extended, TCO, basic */
+ scb_t *scb; /* memory mapped ptr to 82557 scb */
+
+ tcb_t *last_tcb; /* pointer to last tcb sent */
+ buf_pool_t tcb_pool; /* adapter's TCB array */
+ dma_addr_t tcb_phys; /* phys addr of start of TCBs */
+
+ u16 cur_line_speed;
+ u16 cur_dplx_mode;
+
+ struct net_device *device;
+ struct pci_dev *pdev;
+ struct driver_stats drv_stats;
+
+ u8 rev_id; /* adapter PCI revision ID */
+
+ unsigned int phy_addr; /* address of PHY component */
+ unsigned int PhyId; /* ID of PHY component */
+ unsigned int PhyState; /* state for the fix squelch algorithm */
+ unsigned int PhyDelay; /* delay for the fix squelch algorithm */
+
+ /* Lock defintions for the driver */
+ spinlock_t bd_lock; /* board lock */
+ spinlock_t bd_non_tx_lock; /* Non transmit command lock */
+ spinlock_t config_lock; /* config block lock */
+ spinlock_t mdi_access_lock; /* mdi lock */
+
+ struct timer_list watchdog_timer; /* watchdog timer id */
+
+ /* non-tx commands parameters */
+ struct timer_list nontx_timer_id; /* non-tx timer id */
+ struct list_head non_tx_cmd_list;
+ non_tx_cmd_state_t non_tx_command_state;
+ nxmit_cb_entry_t *same_cmd_entry[CB_MAX_NONTX_CMD];
+
+ enum next_cu_cmd_e next_cu_cmd;
+
+ /* Zero Locking Algorithm data members */
+ enum zlock_state_e zlock_state;
+ u8 zlock_read_data[16]; /* number of times each value 0-15 was read */
+ u16 zlock_read_cnt; /* counts number of reads */
+ ulong zlock_sleep_cnt; /* keeps track of "sleep" time */
+
+ u8 config[CB_CFIG_BYTE_COUNT + CB_CFIG_D102_BYTE_COUNT];
+
+ /* IFS params */
+ u8 ifs_state;
+ u8 ifs_value;
+
+ struct cfg_params params; /* adapter's command line parameters */
+
+ u32 speed_duplex_caps; /* adapter's speed/duplex capabilities */
+
+ /* WOL params for ethtool */
+ u32 wolsupported;
+ u32 wolopts;
+ u16 ip_lbytes;
+ struct ethtool_lpbk_data loopback;
+ struct timer_list blink_timer; /* led blink timer id */
+
+#ifdef CONFIG_PM
+ u32 pci_state[16];
+#endif
+ char ifname[IFNAMSIZ];
+#ifdef E100_CU_DEBUG
+ u8 last_cmd;
+ u8 last_sub_cmd;
+#endif
+};
+
+#define E100_AUTONEG 0
+#define E100_SPEED_10_HALF 1
+#define E100_SPEED_10_FULL 2
+#define E100_SPEED_100_HALF 3
+#define E100_SPEED_100_FULL 4
+
+/********* function prototypes *************/
+extern void e100_isolate_driver(struct e100_private *bdp);
+extern void e100_sw_reset(struct e100_private *bdp, u32 reset_cmd);
+extern u8 e100_start_cu(struct e100_private *bdp, tcb_t *tcb);
+extern void e100_free_non_tx_cmd(struct e100_private *bdp,
+ nxmit_cb_entry_t *non_tx_cmd);
+extern nxmit_cb_entry_t *e100_alloc_non_tx_cmd(struct e100_private *bdp);
+extern unsigned char e100_exec_non_cu_cmd(struct e100_private *bdp,
+ nxmit_cb_entry_t *cmd);
+extern unsigned char e100_selftest(struct e100_private *bdp, u32 *st_timeout,
+ u32 *st_result);
+extern unsigned char e100_get_link_state(struct e100_private *bdp);
+extern unsigned char e100_wait_scb(struct e100_private *bdp);
+
+extern void e100_deisolate_driver(struct e100_private *bdp, u8 full_reset);
+extern unsigned char e100_configure_device(struct e100_private *bdp);
+#ifdef E100_CU_DEBUG
+extern unsigned char e100_cu_unknown_state(struct e100_private *bdp);
+#endif
+
+#define ROM_TEST_FAIL 0x01
+#define REGISTER_TEST_FAIL 0x02
+#define SELF_TEST_FAIL 0x04
+#define TEST_TIMEOUT 0x08
+
+enum test_offsets {
+ E100_EEPROM_TEST_FAIL = 0,
+ E100_CHIP_TIMEOUT,
+ E100_ROM_TEST_FAIL,
+ E100_REG_TEST_FAIL,
+ E100_MAC_TEST_FAIL,
+ E100_LPBK_MAC_FAIL,
+ E100_LPBK_PHY_FAIL,
+ E100_MAX_TEST_RES
+};
+
+#endif
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+/**********************************************************************
+* *
+* INTEL CORPORATION *
+* *
+* This software is supplied under the terms of the license included *
+* above. All use of this driver must be in accordance with the terms *
+* of that license. *
+* *
+* Module Name: e100_config.c *
+* *
+* Abstract: Functions for configuring the network adapter. *
+* *
+* Environment: This file is intended to be specific to the Linux *
+* operating system. *
+* *
+**********************************************************************/
+#include "e100_config.h"
+
+static void e100_config_long_rx(struct e100_private *bdp, unsigned char enable);
+
+static const u8 def_config[] = {
+ CB_CFIG_BYTE_COUNT,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x32, 0x07, 0x01,
+ 0x00, 0x2e, 0x00, 0x60, 0x00, 0xf2, 0xc8, 0x00,
+ 0x40, 0xf2, 0x80, 0x3f, 0x05
+};
+
+/**
+ * e100_config_init_82557 - config the 82557 adapter
+ * @bdp: atapter's private data struct
+ *
+ * This routine will initialize the 82557 configure block.
+ * All other init functions will only set values that are
+ * different from the 82557 default.
+ */
+static void __devinit
+e100_config_init_82557(struct e100_private *bdp)
+{
+ /* initialize config block */
+ memcpy(bdp->config, def_config, sizeof (def_config));
+ bdp->config[0] = CB_CFIG_BYTE_COUNT; /* just in case */
+
+ e100_config_ifs(bdp);
+
+ /*
+ * Enable extended statistical counters (82558 and up) and TCO counters
+ * (82559 and up) and set the statistical counters' mode in bdp
+ *
+ * stat. mode | TCO stat. bit (2) | Extended stat. bit (5)
+ * ------------------------------------------------------------------
+ * Basic (557) | 0 | 1
+ * ------------------------------------------------------------------
+ * Extended (558) | 0 | 0
+ * ------------------------------------------------------------------
+ * TCO (559) | 1 | 1
+ * ------------------------------------------------------------------
+ * Reserved | 1 | 0
+ * ------------------------------------------------------------------
+ */
+ bdp->config[6] &= ~CB_CFIG_TCO_STAT;
+ bdp->config[6] |= CB_CFIG_EXT_STAT_DIS;
+ bdp->stat_mode = E100_BASIC_STATS;
+
+ /* Setup for MII or 503 operation. The CRS+CDT bit should only be set */
+ /* when operating in 503 mode. */
+ if (bdp->phy_addr == 32) {
+ bdp->config[8] &= ~CB_CFIG_503_MII;
+ bdp->config[15] |= CB_CFIG_CRS_OR_CDT;
+ } else {
+ bdp->config[8] |= CB_CFIG_503_MII;
+ bdp->config[15] &= ~CB_CFIG_CRS_OR_CDT;
+ }
+
+ e100_config_fc(bdp);
+ e100_config_force_dplx(bdp);
+ e100_config_promisc(bdp, false);
+ e100_config_mulcast_enbl(bdp, false);
+}
+
+static void __devinit
+e100_config_init_82558(struct e100_private *bdp)
+{
+ /* MWI enable. This should be turned on only if the adapter is a 82558/9
+ * and if the PCI command reg. has enabled the MWI bit. */
+ bdp->config[3] |= CB_CFIG_MWI_EN;
+
+ bdp->config[6] &= ~CB_CFIG_EXT_TCB_DIS;
+
+ if (bdp->rev_id >= D101MA_REV_ID) {
+ /* this is 82559 and up - enable TCO counters */
+ bdp->config[6] |= CB_CFIG_TCO_STAT;
+ bdp->config[6] |= CB_CFIG_EXT_STAT_DIS;
+ bdp->stat_mode = E100_TCO_STATS;
+
+ if ((bdp->rev_id < D102_REV_ID) &&
+ (bdp->params.b_params & PRM_XSUMRX) &&
+ (bdp->pdev->device != 0x1209)) {
+
+ bdp->flags |= DF_CSUM_OFFLOAD;
+ bdp->config[9] |= 1;
+ }
+ } else {
+ /* this is 82558 */
+ bdp->config[6] &= ~CB_CFIG_TCO_STAT;
+ bdp->config[6] &= ~CB_CFIG_EXT_STAT_DIS;
+ bdp->stat_mode = E100_EXTENDED_STATS;
+ }
+
+ e100_config_long_rx(bdp, true);
+}
+
+static void __devinit
+e100_config_init_82550(struct e100_private *bdp)
+{
+ /* The D102 chip allows for 32 config bytes. This value is
+ * supposed to be in Byte 0. Just add the extra bytes to
+ * what was already setup in the block. */
+ bdp->config[0] += CB_CFIG_D102_BYTE_COUNT;
+
+ /* now we need to enable the extended RFD. When this is
+ * enabled, the immediated receive data buffer starts at offset
+ * 32 from the RFD base address, instead of at offset 16. */
+ bdp->config[7] |= CB_CFIG_EXTENDED_RFD;
+
+ /* put the chip into D102 receive mode. This is necessary
+ * for any parsing and offloading features. */
+ bdp->config[22] = CB_CFIG_RECEIVE_GAMLA_MODE;
+
+ /* set the flag if checksum offloading was enabled */
+ if (bdp->params.b_params & PRM_XSUMRX) {
+ bdp->flags |= DF_CSUM_OFFLOAD;
+ }
+}
+
+/* Initialize the adapter's configure block */
+void __devinit
+e100_config_init(struct e100_private *bdp)
+{
+ e100_config_init_82557(bdp);
+
+ if (bdp->flags & IS_BACHELOR)
+ e100_config_init_82558(bdp);
+
+ if (bdp->rev_id >= D102_REV_ID)
+ e100_config_init_82550(bdp);
+}
+
+/**
+ * e100_force_config - force a configure command
+ * @bdp: atapter's private data struct
+ *
+ * This routine will force a configure command to the adapter.
+ * The command will be executed in polled mode as interrupts
+ * are _disabled_ at this time.
+ *
+ * Returns:
+ * true: if the configure command was successfully issued and completed
+ * false: otherwise
+ */
+unsigned char
+e100_force_config(struct e100_private *bdp)
+{
+ spin_lock_bh(&(bdp->config_lock));
+
+ bdp->config[0] = CB_CFIG_BYTE_COUNT;
+ if (bdp->rev_id >= D102_REV_ID) {
+ /* The D102 chip allows for 32 config bytes. This value is
+ supposed to be in Byte 0. Just add the extra bytes to
+ what was already setup in the block. */
+ bdp->config[0] += CB_CFIG_D102_BYTE_COUNT;
+ }
+
+ spin_unlock_bh(&(bdp->config_lock));
+
+ // although we call config outside the lock, there is no
+ // race condition because config byte count has maximum value
+ return e100_config(bdp);
+}
+
+/**
+ * e100_config - issue a configure command
+ * @bdp: atapter's private data struct
+ *
+ * This routine will issue a configure command to the 82557.
+ * This command will be executed in polled mode as interrupts
+ * are _disabled_ at this time.
+ *
+ * Returns:
+ * true: if the configure command was successfully issued and completed
+ * false: otherwise
+ */
+unsigned char
+e100_config(struct e100_private *bdp)
+{
+ cb_header_t *pntcb_hdr;
+ unsigned char res = true;
+ nxmit_cb_entry_t *cmd;
+
+ if (bdp->config[0] == 0) {
+ goto exit;
+ }
+
+ if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
+ res = false;
+ goto exit;
+ }
+
+ pntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
+ pntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_CONFIGURE);
+
+ spin_lock_bh(&bdp->config_lock);
+
+ if (bdp->config[0] < CB_CFIG_MIN_PARAMS) {
+ bdp->config[0] = CB_CFIG_MIN_PARAMS;
+ }
+
+ /* Copy the device's config block to the device's memory */
+ memcpy(cmd->non_tx_cmd->ntcb.config.cfg_byte, bdp->config,
+ bdp->config[0]);
+ /* reset number of bytes to config next time */
+ bdp->config[0] = 0;
+
+ spin_unlock_bh(&bdp->config_lock);
+
+ res = e100_exec_non_cu_cmd(bdp, cmd);
+
+exit:
+ if (netif_running(bdp->device))
+ netif_wake_queue(bdp->device);
+ return res;
+}
+
+/**
+ * e100_config_fc - config flow-control state
+ * @bdp: adapter's private data struct
+ *
+ * This routine will enable or disable flow control support in the adapter's
+ * config block. Flow control will be enable only if requested using the command
+ * line option, and if the link is flow-contorl capable (both us and the link
+ * partner). But, if link partner is capable of autoneg, but not capable of
+ * flow control, received PAUSE frames are still honored.
+ */
+void
+e100_config_fc(struct e100_private *bdp)
+{
+ unsigned char enable = false;
+ /* 82557 doesn't support fc. Don't touch this option */
+ if (!(bdp->flags & IS_BACHELOR))
+ return;
+
+ /* Enable fc if requested and if the link supports it */
+ if ((bdp->params.b_params & PRM_FC) && (bdp->flags &
+ (DF_LINK_FC_CAP | DF_LINK_FC_TX_ONLY))) {
+ enable = true;
+ }
+
+ spin_lock_bh(&(bdp->config_lock));
+
+ if (enable) {
+ if (bdp->flags & DF_LINK_FC_TX_ONLY) {
+ /* If link partner is capable of autoneg, but */
+ /* not capable of flow control, Received PAUSE */
+ /* frames are still honored, i.e., */
+ /* transmitted frames would be paused by */
+ /* incoming PAUSE frames */
+ bdp->config[16] = DFLT_NO_FC_DELAY_LSB;
+ bdp->config[17] = DFLT_NO_FC_DELAY_MSB;
+ bdp->config[19] &= ~(CB_CFIG_FC_RESTOP | CB_CFIG_FC_RESTART);
+ bdp->config[19] |= CB_CFIG_FC_REJECT;
+ bdp->config[19] &= ~CB_CFIG_TX_FC_DIS;
+ } else {
+ bdp->config[16] = DFLT_FC_DELAY_LSB;
+ bdp->config[17] = DFLT_FC_DELAY_MSB;
+ bdp->config[19] |= CB_CFIG_FC_OPTS;
+ bdp->config[19] &= ~CB_CFIG_TX_FC_DIS;
+ }
+ } else {
+ bdp->config[16] = DFLT_NO_FC_DELAY_LSB;
+ bdp->config[17] = DFLT_NO_FC_DELAY_MSB;
+ bdp->config[19] &= ~CB_CFIG_FC_OPTS;
+ bdp->config[19] |= CB_CFIG_TX_FC_DIS;
+ }
+ E100_CONFIG(bdp, 19);
+ spin_unlock_bh(&(bdp->config_lock));
+
+ return;
+}
+
+/**
+ * e100_config_promisc - configure promiscuous mode
+ * @bdp: atapter's private data struct
+ * @enable: should we enable this option or not
+ *
+ * This routine will enable or disable promiscuous mode
+ * in the adapter's config block.
+ */
+void
+e100_config_promisc(struct e100_private *bdp, unsigned char enable)
+{
+ spin_lock_bh(&(bdp->config_lock));
+
+ /* if in promiscuous mode, save bad frames */
+ if (enable) {
+
+ if (!(bdp->config[6] & CB_CFIG_SAVE_BAD_FRAMES)) {
+ bdp->config[6] |= CB_CFIG_SAVE_BAD_FRAMES;
+ E100_CONFIG(bdp, 6);
+ }
+
+ if (bdp->config[7] & (u8) BIT_0) {
+ bdp->config[7] &= (u8) (~BIT_0);
+ E100_CONFIG(bdp, 7);
+ }
+
+ if (!(bdp->config[15] & CB_CFIG_PROMISCUOUS)) {
+ bdp->config[15] |= CB_CFIG_PROMISCUOUS;
+ E100_CONFIG(bdp, 15);
+ }
+
+ } else { /* not in promiscuous mode */
+
+ if (bdp->config[6] & CB_CFIG_SAVE_BAD_FRAMES) {
+ bdp->config[6] &= ~CB_CFIG_SAVE_BAD_FRAMES;
+ E100_CONFIG(bdp, 6);
+ }
+
+ if (!(bdp->config[7] & (u8) BIT_0)) {
+ bdp->config[7] |= (u8) (BIT_0);
+ E100_CONFIG(bdp, 7);
+ }
+
+ if (bdp->config[15] & CB_CFIG_PROMISCUOUS) {
+ bdp->config[15] &= ~CB_CFIG_PROMISCUOUS;
+ E100_CONFIG(bdp, 15);
+ }
+ }
+
+ spin_unlock_bh(&(bdp->config_lock));
+}
+
+/**
+ * e100_config_mulcast_enbl - configure allmulti mode
+ * @bdp: atapter's private data struct
+ * @enable: should we enable this option or not
+ *
+ * This routine will enable or disable reception of all multicast packets
+ * in the adapter's config block.
+ */
+void
+e100_config_mulcast_enbl(struct e100_private *bdp, unsigned char enable)
+{
+ spin_lock_bh(&(bdp->config_lock));
+
+ /* this flag is used to enable receiving all multicast packet */
+ if (enable) {
+ if (!(bdp->config[21] & CB_CFIG_MULTICAST_ALL)) {
+ bdp->config[21] |= CB_CFIG_MULTICAST_ALL;
+ E100_CONFIG(bdp, 21);
+ }
+
+ } else {
+ if (bdp->config[21] & CB_CFIG_MULTICAST_ALL) {
+ bdp->config[21] &= ~CB_CFIG_MULTICAST_ALL;
+ E100_CONFIG(bdp, 21);
+ }
+ }
+
+ spin_unlock_bh(&(bdp->config_lock));
+}
+
+/**
+ * e100_config_ifs - configure the IFS parameter
+ * @bdp: atapter's private data struct
+ *
+ * This routine will configure the adaptive IFS value
+ * in the adapter's config block. IFS values are only
+ * relevant in half duplex, so set to 0 in full duplex.
+ */
+void
+e100_config_ifs(struct e100_private *bdp)
+{
+ u8 value = 0;
+
+ spin_lock_bh(&(bdp->config_lock));
+
+ /* IFS value is only needed to be specified at half-duplex mode */
+ if (bdp->cur_dplx_mode == HALF_DUPLEX) {
+ value = (u8) bdp->ifs_value;
+ }
+
+ if (bdp->config[2] != value) {
+ bdp->config[2] = value;
+ E100_CONFIG(bdp, 2);
+ }
+
+ spin_unlock_bh(&(bdp->config_lock));
+}
+
+/**
+ * e100_config_force_dplx - configure the forced full duplex mode
+ * @bdp: atapter's private data struct
+ *
+ * This routine will enable or disable force full duplex
+ * in the adapter's config block. If the PHY is 503, and
+ * the duplex is full, consider the adapter forced.
+ */
+void
+e100_config_force_dplx(struct e100_private *bdp)
+{
+ spin_lock_bh(&(bdp->config_lock));
+
+ /* We must force full duplex on if we are using PHY 0, and we are */
+ /* supposed to run in FDX mode. We do this because the e100 has only */
+ /* one FDX# input pin, and that pin will be connected to PHY 1. */
+ /* Changed the 'if' condition below to fix performance problem * at 10
+ * full. The Phy was getting forced to full duplex while the MAC * was
+ * not, because the cur_dplx_mode was not being set to 2 by SetupPhy. *
+ * This is how the condition was, initially. * This has been changed so
+ * that the MAC gets forced to full duplex * simply if the user has
+ * forced full duplex. * * if (( bdp->phy_addr == 0 ) && (
+ * bdp->cur_dplx_mode == 2 )) */
+ /* The rest of the fix is in the PhyDetect code. */
+ if ((bdp->params.e100_speed_duplex == E100_SPEED_10_FULL) ||
+ (bdp->params.e100_speed_duplex == E100_SPEED_100_FULL) ||
+ ((bdp->phy_addr == 32) && (bdp->cur_dplx_mode == FULL_DUPLEX))) {
+ if (!(bdp->config[19] & (u8) CB_CFIG_FORCE_FDX)) {
+ bdp->config[19] |= (u8) CB_CFIG_FORCE_FDX;
+ E100_CONFIG(bdp, 19);
+ }
+
+ } else {
+ if (bdp->config[19] & (u8) CB_CFIG_FORCE_FDX) {
+ bdp->config[19] &= (u8) (~CB_CFIG_FORCE_FDX);
+ E100_CONFIG(bdp, 19);
+ }
+ }
+
+ spin_unlock_bh(&(bdp->config_lock));
+}
+
+/**
+ * e100_config_long_rx
+ * @bdp: atapter's private data struct
+ * @enable: should we enable this option or not
+ *
+ * This routine will enable or disable reception of larger packets.
+ * This is needed by VLAN implementations.
+ */
+static void
+e100_config_long_rx(struct e100_private *bdp, unsigned char enable)
+{
+ if (enable) {
+ if (!(bdp->config[18] & CB_CFIG_LONG_RX_OK)) {
+ bdp->config[18] |= CB_CFIG_LONG_RX_OK;
+ E100_CONFIG(bdp, 18);
+ }
+
+ } else {
+ if ((bdp->config[18] & CB_CFIG_LONG_RX_OK)) {
+ bdp->config[18] &= ~CB_CFIG_LONG_RX_OK;
+ E100_CONFIG(bdp, 18);
+ }
+ }
+}
+
+/**
+ * e100_config_wol
+ * @bdp: atapter's private data struct
+ *
+ * This sets configuration options for PHY and Magic Packet WoL
+ */
+void
+e100_config_wol(struct e100_private *bdp)
+{
+ spin_lock_bh(&(bdp->config_lock));
+
+ if (bdp->wolopts & WAKE_PHY) {
+ bdp->config[9] |= CB_LINK_STATUS_WOL;
+ }
+ else {
+ /* Disable PHY WoL */
+ bdp->config[9] &= ~CB_LINK_STATUS_WOL;
+ }
+
+ if (bdp->wolopts & WAKE_MAGIC) {
+ bdp->config[19] &= ~CB_DISABLE_MAGPAK_WAKE;
+ }
+ else {
+ /* Disable Magic Packet WoL */
+ bdp->config[19] |= CB_DISABLE_MAGPAK_WAKE;
+ }
+
+ E100_CONFIG(bdp, 19);
+ spin_unlock_bh(&(bdp->config_lock));
+}
+
+void
+e100_config_vlan_drop(struct e100_private *bdp, unsigned char enable)
+{
+ spin_lock_bh(&(bdp->config_lock));
+ if (enable) {
+ if (!(bdp->config[22] & CB_CFIG_VLAN_DROP_ENABLE)) {
+ bdp->config[22] |= CB_CFIG_VLAN_DROP_ENABLE;
+ E100_CONFIG(bdp, 22);
+ }
+
+ } else {
+ if ((bdp->config[22] & CB_CFIG_VLAN_DROP_ENABLE)) {
+ bdp->config[22] &= ~CB_CFIG_VLAN_DROP_ENABLE;
+ E100_CONFIG(bdp, 22);
+ }
+ }
+ spin_unlock_bh(&(bdp->config_lock));
+}
+
+/**
+ * e100_config_loopback_mode
+ * @bdp: atapter's private data struct
+ * @mode: loopback mode(phy/mac/none)
+ *
+ */
+unsigned char
+e100_config_loopback_mode(struct e100_private *bdp, u8 mode)
+{
+ unsigned char bc_changed = false;
+ u8 config_byte;
+
+ spin_lock_bh(&(bdp->config_lock));
+
+ switch (mode) {
+ case NO_LOOPBACK:
+ config_byte = CB_CFIG_LOOPBACK_NORMAL;
+ break;
+ case MAC_LOOPBACK:
+ config_byte = CB_CFIG_LOOPBACK_INTERNAL;
+ break;
+ case PHY_LOOPBACK:
+ config_byte = CB_CFIG_LOOPBACK_EXTERNAL;
+ break;
+ default:
+ printk(KERN_NOTICE "e100: e100_config_loopback_mode: "
+ "Invalid argument 'mode': %d\n", mode);
+ goto exit;
+ }
+
+ if ((bdp->config[10] & CB_CFIG_LOOPBACK_MODE) != config_byte) {
+
+ bdp->config[10] &= (~CB_CFIG_LOOPBACK_MODE);
+ bdp->config[10] |= config_byte;
+ E100_CONFIG(bdp, 10);
+ bc_changed = true;
+ }
+
+exit:
+ spin_unlock_bh(&(bdp->config_lock));
+ return bc_changed;
+}
+unsigned char
+e100_config_tcb_ext_enable(struct e100_private *bdp, unsigned char enable)
+{
+ unsigned char bc_changed = false;
+
+ spin_lock_bh(&(bdp->config_lock));
+
+ if (enable) {
+ if (bdp->config[6] & CB_CFIG_EXT_TCB_DIS) {
+
+ bdp->config[6] &= (~CB_CFIG_EXT_TCB_DIS);
+ E100_CONFIG(bdp, 6);
+ bc_changed = true;
+ }
+
+ } else {
+ if (!(bdp->config[6] & CB_CFIG_EXT_TCB_DIS)) {
+
+ bdp->config[6] |= CB_CFIG_EXT_TCB_DIS;
+ E100_CONFIG(bdp, 6);
+ bc_changed = true;
+ }
+ }
+ spin_unlock_bh(&(bdp->config_lock));
+
+ return bc_changed;
+}
+unsigned char
+e100_config_dynamic_tbd(struct e100_private *bdp, unsigned char enable)
+{
+ unsigned char bc_changed = false;
+
+ spin_lock_bh(&(bdp->config_lock));
+
+ if (enable) {
+ if (!(bdp->config[7] & CB_CFIG_DYNTBD_EN)) {
+
+ bdp->config[7] |= CB_CFIG_DYNTBD_EN;
+ E100_CONFIG(bdp, 7);
+ bc_changed = true;
+ }
+
+ } else {
+ if (bdp->config[7] & CB_CFIG_DYNTBD_EN) {
+
+ bdp->config[7] &= (~CB_CFIG_DYNTBD_EN);
+ E100_CONFIG(bdp, 7);
+ bc_changed = true;
+ }
+ }
+ spin_unlock_bh(&(bdp->config_lock));
+
+ return bc_changed;
+}
+
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+#ifndef _E100_CONFIG_INC_
+#define _E100_CONFIG_INC_
+
+#include "e100.h"
+
+#define E100_CONFIG(bdp, X) ((bdp)->config[0] = max_t(u8, (bdp)->config[0], (X)+1))
+
+#define CB_CFIG_MIN_PARAMS 8
+
+/* byte 0 bit definitions*/
+#define CB_CFIG_BYTE_COUNT_MASK BIT_0_5 /* Byte count occupies bit 5-0 */
+
+/* byte 1 bit definitions*/
+#define CB_CFIG_RXFIFO_LIMIT_MASK BIT_0_4 /* RxFifo limit mask */
+#define CB_CFIG_TXFIFO_LIMIT_MASK BIT_4_7 /* TxFifo limit mask */
+
+/* byte 2 bit definitions -- ADAPTIVE_IFS*/
+
+/* word 3 bit definitions -- RESERVED*/
+/* Changed for 82558 enhancements */
+/* byte 3 bit definitions */
+#define CB_CFIG_MWI_EN BIT_0 /* Enable MWI on PCI bus */
+#define CB_CFIG_TYPE_EN BIT_1 /* Type Enable */
+#define CB_CFIG_READAL_EN BIT_2 /* Enable Read Align */
+#define CB_CFIG_TERMCL_EN BIT_3 /* Cache line write */
+
+/* byte 4 bit definitions*/
+#define CB_CFIG_RX_MIN_DMA_MASK BIT_0_6 /* Rx minimum DMA count mask */
+
+/* byte 5 bit definitions*/
+#define CB_CFIG_TX_MIN_DMA_MASK BIT_0_6 /* Tx minimum DMA count mask */
+#define CB_CFIG_DMBC_EN BIT_7 /* Enable Tx/Rx min. DMA counts */
+
+/* Changed for 82558 enhancements */
+/* byte 6 bit definitions*/
+#define CB_CFIG_LATE_SCB BIT_0 /* Update SCB After New Tx Start */
+#define CB_CFIG_DIRECT_DMA_DIS BIT_1 /* Direct DMA mode */
+#define CB_CFIG_TNO_INT BIT_2 /* Tx Not OK Interrupt */
+#define CB_CFIG_TCO_STAT BIT_2 /* TCO statistics in 559 and above */
+#define CB_CFIG_CI_INT BIT_3 /* Command Complete Interrupt */
+#define CB_CFIG_EXT_TCB_DIS BIT_4 /* Extended TCB */
+#define CB_CFIG_EXT_STAT_DIS BIT_5 /* Extended Stats */
+#define CB_CFIG_SAVE_BAD_FRAMES BIT_7 /* Save Bad Frames Enabled */
+
+/* byte 7 bit definitions*/
+#define CB_CFIG_DISC_SHORT_FRAMES BIT_0 /* Discard Short Frames */
+#define CB_CFIG_DYNTBD_EN BIT_7 /* Enable dynamic TBD */
+/* Enable extended RFD's on D102 */
+#define CB_CFIG_EXTENDED_RFD BIT_5
+
+/* byte 8 bit definitions*/
+#define CB_CFIG_503_MII BIT_0 /* 503 vs. MII mode */
+
+/* byte 9 bit definitions -- pre-defined all zeros*/
+#define CB_LINK_STATUS_WOL BIT_5
+
+/* byte 10 bit definitions*/
+#define CB_CFIG_NO_SRCADR BIT_3 /* No Source Address Insertion */
+#define CB_CFIG_PREAMBLE_LEN BIT_4_5 /* Preamble Length */
+#define CB_CFIG_LOOPBACK_MODE BIT_6_7 /* Loopback Mode */
+#define CB_CFIG_LOOPBACK_NORMAL 0
+#define CB_CFIG_LOOPBACK_INTERNAL BIT_6
+#define CB_CFIG_LOOPBACK_EXTERNAL BIT_6_7
+
+/* byte 11 bit definitions*/
+#define CB_CFIG_LINEAR_PRIORITY BIT_0_2 /* Linear Priority */
+
+/* byte 12 bit definitions*/
+#define CB_CFIG_LINEAR_PRI_MODE BIT_0 /* Linear Priority mode */
+#define CB_CFIG_IFS_MASK BIT_4_7 /* Interframe Spacing mask */
+
+/* byte 13 bit definitions -- pre-defined all zeros*/
+
+/* byte 14 bit definitions -- pre-defined 0xf2*/
+
+/* byte 15 bit definitions*/
+#define CB_CFIG_PROMISCUOUS BIT_0 /* Promiscuous Mode Enable */
+#define CB_CFIG_BROADCAST_DIS BIT_1 /* Broadcast Mode Disable */
+#define CB_CFIG_CRS_OR_CDT BIT_7 /* CRS Or CDT */
+
+/* byte 16 bit definitions -- pre-defined all zeros*/
+#define DFLT_FC_DELAY_LSB 0x1f /* Delay for outgoing Pause frames */
+#define DFLT_NO_FC_DELAY_LSB 0x00 /* no flow control default value */
+
+/* byte 17 bit definitions -- pre-defined 0x40*/
+#define DFLT_FC_DELAY_MSB 0x01 /* Delay for outgoing Pause frames */
+#define DFLT_NO_FC_DELAY_MSB 0x40 /* no flow control default value */
+
+/* byte 18 bit definitions*/
+#define CB_CFIG_STRIPPING BIT_0 /* Padding Disabled */
+#define CB_CFIG_PADDING BIT_1 /* Padding Disabled */
+#define CB_CFIG_CRC_IN_MEM BIT_2 /* Transfer CRC To Memory */
+
+/* byte 19 bit definitions*/
+#define CB_CFIG_TX_ADDR_WAKE BIT_0 /* Address Wakeup */
+#define CB_DISABLE_MAGPAK_WAKE BIT_1 /* Magic Packet Wakeup disable */
+/* Changed TX_FC_EN to TX_FC_DIS because 0 enables, 1 disables. Jul 8, 1999 */
+#define CB_CFIG_TX_FC_DIS BIT_2 /* Tx Flow Control Disable */
+#define CB_CFIG_FC_RESTOP BIT_3 /* Rx Flow Control Restop */
+#define CB_CFIG_FC_RESTART BIT_4 /* Rx Flow Control Restart */
+#define CB_CFIG_FC_REJECT BIT_5 /* Rx Flow Control Restart */
+#define CB_CFIG_FC_OPTS (CB_CFIG_FC_RESTOP | CB_CFIG_FC_RESTART | CB_CFIG_FC_REJECT)
+
+/* end 82558/9 specifics */
+
+#define CB_CFIG_FORCE_FDX BIT_6 /* Force Full Duplex */
+#define CB_CFIG_FDX_ENABLE BIT_7 /* Full Duplex Enabled */
+
+/* byte 20 bit definitions*/
+#define CB_CFIG_MULTI_IA BIT_6 /* Multiple IA Addr */
+
+/* byte 21 bit definitions*/
+#define CB_CFIG_MULTICAST_ALL BIT_3 /* Multicast All */
+
+/* byte 22 bit defines */
+#define CB_CFIG_RECEIVE_GAMLA_MODE BIT_0 /* D102 receive mode */
+#define CB_CFIG_VLAN_DROP_ENABLE BIT_1 /* vlan stripping */
+
+#define CB_CFIG_LONG_RX_OK BIT_3
+
+#define NO_LOOPBACK 0
+#define MAC_LOOPBACK 0x01
+#define PHY_LOOPBACK 0x02
+
+/* function prototypes */
+extern void e100_config_init(struct e100_private *bdp);
+extern unsigned char e100_force_config(struct e100_private *bdp);
+extern unsigned char e100_config(struct e100_private *bdp);
+extern void e100_config_fc(struct e100_private *bdp);
+extern void e100_config_promisc(struct e100_private *bdp, unsigned char enable);
+extern void e100_config_brdcast_dsbl(struct e100_private *bdp);
+extern void e100_config_mulcast_enbl(struct e100_private *bdp,
+ unsigned char enable);
+extern void e100_config_ifs(struct e100_private *bdp);
+extern void e100_config_force_dplx(struct e100_private *bdp);
+extern u8 e100_config_loopback_mode(struct e100_private *bdp, u8 mode);
+extern u8 e100_config_dynamic_tbd(struct e100_private *bdp, u8 enable);
+extern u8 e100_config_tcb_ext_enable(struct e100_private *bdp, u8 enable);
+extern void e100_config_vlan_drop(struct e100_private *bdp, unsigned char enable);
+#endif /* _E100_CONFIG_INC_ */
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+/**********************************************************************
+* *
+* INTEL CORPORATION *
+* *
+* This software is supplied under the terms of the license included *
+* above. All use of this driver must be in accordance with the terms *
+* of that license. *
+* *
+* Module Name: e100_eeprom.c *
+* *
+* Abstract: This module contains routines to read and write to a *
+* serial EEPROM *
+* *
+* Environment: This file is intended to be specific to the Linux *
+* operating system. *
+* *
+**********************************************************************/
+#include "e100.h"
+
+#define CSR_EEPROM_CONTROL_FIELD(bdp) ((bdp)->scb->scb_eprm_cntrl)
+
+#define CSR_GENERAL_CONTROL2_FIELD(bdp) \
+ ((bdp)->scb->scb_ext.d102_scb.scb_gen_ctrl2)
+
+#define EEPROM_STALL_TIME 4
+#define EEPROM_CHECKSUM ((u16) 0xBABA)
+#define EEPROM_MAX_WORD_SIZE 256
+
+void e100_eeprom_cleanup(struct e100_private *adapter);
+u16 e100_eeprom_calculate_chksum(struct e100_private *adapter);
+static void e100_eeprom_write_word(struct e100_private *adapter, u16 reg,
+ u16 data);
+void e100_eeprom_write_block(struct e100_private *adapter, u16 start, u16 *data,
+ u16 size);
+u16 e100_eeprom_size(struct e100_private *adapter);
+u16 e100_eeprom_read(struct e100_private *adapter, u16 reg);
+
+static void shift_out_bits(struct e100_private *adapter, u16 data, u16 count);
+static u16 shift_in_bits(struct e100_private *adapter);
+static void raise_clock(struct e100_private *adapter, u16 *x);
+static void lower_clock(struct e100_private *adapter, u16 *x);
+static u16 eeprom_wait_cmd_done(struct e100_private *adapter);
+static void eeprom_stand_by(struct e100_private *adapter);
+
+//----------------------------------------------------------------------------------------
+// Procedure: eeprom_set_semaphore
+//
+// Description: This function set (write 1) Gamla EEPROM semaphore bit (bit 23 word 0x1C in the CSR).
+//
+// Arguments:
+// Adapter - Adapter context
+//
+// Returns: true if success
+// else return false
+//
+//----------------------------------------------------------------------------------------
+
+inline u8
+eeprom_set_semaphore(struct e100_private *adapter)
+{
+ u16 data = 0;
+ unsigned long expiration_time = jiffies + HZ / 100 + 1;
+
+ do {
+ // Get current value of General Control 2
+ data = readb(&CSR_GENERAL_CONTROL2_FIELD(adapter));
+
+ // Set bit 23 word 0x1C in the CSR.
+ data |= SCB_GCR2_EEPROM_ACCESS_SEMAPHORE;
+ writeb(data, &CSR_GENERAL_CONTROL2_FIELD(adapter));
+
+ // Check to see if this bit set or not.
+ data = readb(&CSR_GENERAL_CONTROL2_FIELD(adapter));
+
+ if (data & SCB_GCR2_EEPROM_ACCESS_SEMAPHORE) {
+ return true;
+ }
+
+ if (time_before(jiffies, expiration_time))
+ yield();
+ else
+ return false;
+
+ } while (true);
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: eeprom_reset_semaphore
+//
+// Description: This function reset (write 0) Gamla EEPROM semaphore bit
+// (bit 23 word 0x1C in the CSR).
+//
+// Arguments: struct e100_private * adapter - Adapter context
+//----------------------------------------------------------------------------------------
+
+inline void
+eeprom_reset_semaphore(struct e100_private *adapter)
+{
+ u16 data = 0;
+
+ data = readb(&CSR_GENERAL_CONTROL2_FIELD(adapter));
+ data &= ~(SCB_GCR2_EEPROM_ACCESS_SEMAPHORE);
+ writeb(data, &CSR_GENERAL_CONTROL2_FIELD(adapter));
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: e100_eeprom_size
+//
+// Description: This routine determines the size of the EEPROM. This value should be
+// checked for validity - ie. is it too big or too small. The size returned
+// is then passed to the read/write functions.
+//
+// Returns:
+// Size of the eeprom, or zero if an error occurred
+//----------------------------------------------------------------------------------------
+u16
+e100_eeprom_size(struct e100_private *adapter)
+{
+ u16 x, size = 1; // must be one to accumulate a product
+
+ // if we've already stored this data, read from memory
+ if (adapter->eeprom_size) {
+ return adapter->eeprom_size;
+ }
+ // otherwise, read from the eeprom
+ // Set EEPROM semaphore.
+ if (adapter->rev_id >= D102_REV_ID) {
+ if (!eeprom_set_semaphore(adapter))
+ return 0;
+ }
+ // enable the eeprom by setting EECS.
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ x &= ~(EEDI | EEDO | EESK);
+ x |= EECS;
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ // write the read opcode
+ shift_out_bits(adapter, EEPROM_READ_OPCODE, 3);
+
+ // experiment to discover the size of the eeprom. request register zero
+ // and wait for the eeprom to tell us it has accepted the entire address.
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ do {
+ size *= 2; // each bit of address doubles eeprom size
+ x |= EEDO; // set bit to detect "dummy zero"
+ x &= ~EEDI; // address consists of all zeros
+
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status));
+ udelay(EEPROM_STALL_TIME);
+ raise_clock(adapter, &x);
+ lower_clock(adapter, &x);
+
+ // check for "dummy zero"
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ if (size > EEPROM_MAX_WORD_SIZE) {
+ size = 0;
+ break;
+ }
+ } while (x & EEDO);
+
+ // read in the value requested
+ (void) shift_in_bits(adapter);
+ e100_eeprom_cleanup(adapter);
+
+ // Clear EEPROM Semaphore.
+ if (adapter->rev_id >= D102_REV_ID) {
+ eeprom_reset_semaphore(adapter);
+ }
+
+ return size;
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: eeprom_address_size
+//
+// Description: determines the number of bits in an address for the eeprom acceptable
+// values are 64, 128, and 256
+// Arguments: size of the eeprom
+// Returns: bits in an address for that size eeprom
+//----------------------------------------------------------------------------------------
+
+static inline int
+eeprom_address_size(u16 size)
+{
+ int isize = size;
+
+ return (ffs(isize) - 1);
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: e100_eeprom_read
+//
+// Description: This routine serially reads one word out of the EEPROM.
+//
+// Arguments:
+// adapter - our adapter context
+// reg - EEPROM word to read.
+//
+// Returns:
+// Contents of EEPROM word (reg).
+//----------------------------------------------------------------------------------------
+
+u16
+e100_eeprom_read(struct e100_private *adapter, u16 reg)
+{
+ u16 x, data, bits;
+
+ // Set EEPROM semaphore.
+ if (adapter->rev_id >= D102_REV_ID) {
+ if (!eeprom_set_semaphore(adapter))
+ return 0;
+ }
+ // eeprom size is initialized to zero
+ if (!adapter->eeprom_size)
+ adapter->eeprom_size = e100_eeprom_size(adapter);
+
+ bits = eeprom_address_size(adapter->eeprom_size);
+
+ // select EEPROM, reset bits, set EECS
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ x &= ~(EEDI | EEDO | EESK);
+ x |= EECS;
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ // write the read opcode and register number in that order
+ // The opcode is 3bits in length, reg is 'bits' bits long
+ shift_out_bits(adapter, EEPROM_READ_OPCODE, 3);
+ shift_out_bits(adapter, reg, bits);
+
+ // Now read the data (16 bits) in from the selected EEPROM word
+ data = shift_in_bits(adapter);
+
+ e100_eeprom_cleanup(adapter);
+
+ // Clear EEPROM Semaphore.
+ if (adapter->rev_id >= D102_REV_ID) {
+ eeprom_reset_semaphore(adapter);
+ }
+
+ return data;
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: shift_out_bits
+//
+// Description: This routine shifts data bits out to the EEPROM.
+//
+// Arguments:
+// data - data to send to the EEPROM.
+// count - number of data bits to shift out.
+//
+// Returns: (none)
+//----------------------------------------------------------------------------------------
+
+static void
+shift_out_bits(struct e100_private *adapter, u16 data, u16 count)
+{
+ u16 x, mask;
+
+ mask = 1 << (count - 1);
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ x &= ~(EEDO | EEDI);
+
+ do {
+ x &= ~EEDI;
+ if (data & mask)
+ x |= EEDI;
+
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status)); /* flush command to card */
+ udelay(EEPROM_STALL_TIME);
+ raise_clock(adapter, &x);
+ lower_clock(adapter, &x);
+ mask = mask >> 1;
+ } while (mask);
+
+ x &= ~EEDI;
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: raise_clock
+//
+// Description: This routine raises the EEPROM's clock input (EESK)
+//
+// Arguments:
+// x - Ptr to the EEPROM control register's current value
+//
+// Returns: (none)
+//----------------------------------------------------------------------------------------
+
+void
+raise_clock(struct e100_private *adapter, u16 *x)
+{
+ *x = *x | EESK;
+ writew(*x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status)); /* flush command to card */
+ udelay(EEPROM_STALL_TIME);
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: lower_clock
+//
+// Description: This routine lower's the EEPROM's clock input (EESK)
+//
+// Arguments:
+// x - Ptr to the EEPROM control register's current value
+//
+// Returns: (none)
+//----------------------------------------------------------------------------------------
+
+void
+lower_clock(struct e100_private *adapter, u16 *x)
+{
+ *x = *x & ~EESK;
+ writew(*x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status)); /* flush command to card */
+ udelay(EEPROM_STALL_TIME);
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: shift_in_bits
+//
+// Description: This routine shifts data bits in from the EEPROM.
+//
+// Arguments:
+//
+// Returns:
+// The contents of that particular EEPROM word
+//----------------------------------------------------------------------------------------
+
+static u16
+shift_in_bits(struct e100_private *adapter)
+{
+ u16 x, d, i;
+
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ x &= ~(EEDO | EEDI);
+ d = 0;
+
+ for (i = 0; i < 16; i++) {
+ d <<= 1;
+ raise_clock(adapter, &x);
+
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ x &= ~EEDI;
+ if (x & EEDO)
+ d |= 1;
+
+ lower_clock(adapter, &x);
+ }
+
+ return d;
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: e100_eeprom_cleanup
+//
+// Description: This routine returns the EEPROM to an idle state
+//----------------------------------------------------------------------------------------
+
+void
+e100_eeprom_cleanup(struct e100_private *adapter)
+{
+ u16 x;
+
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ x &= ~(EECS | EEDI);
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ raise_clock(adapter, &x);
+ lower_clock(adapter, &x);
+}
+
+//**********************************************************************************
+// Procedure: e100_eeprom_update_chksum
+//
+// Description: Calculates the checksum and writes it to the EEProm.
+// It calculates the checksum accroding to the formula:
+// Checksum = 0xBABA - (sum of first 63 words).
+//
+//-----------------------------------------------------------------------------------
+u16
+e100_eeprom_calculate_chksum(struct e100_private *adapter)
+{
+ u16 idx, xsum_index, checksum = 0;
+
+ // eeprom size is initialized to zero
+ if (!adapter->eeprom_size)
+ adapter->eeprom_size = e100_eeprom_size(adapter);
+
+ xsum_index = adapter->eeprom_size - 1;
+ for (idx = 0; idx < xsum_index; idx++)
+ checksum += e100_eeprom_read(adapter, idx);
+
+ checksum = EEPROM_CHECKSUM - checksum;
+ return checksum;
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: e100_eeprom_write_word
+//
+// Description: This routine writes a word to a specific EEPROM location without.
+// taking EEPROM semaphore and updating checksum.
+// Use e100_eeprom_write_block for the EEPROM update
+// Arguments: reg - The EEPROM word that we are going to write to.
+// data - The data (word) that we are going to write to the EEPROM.
+//----------------------------------------------------------------------------------------
+static void
+e100_eeprom_write_word(struct e100_private *adapter, u16 reg, u16 data)
+{
+ u16 x;
+ u16 bits;
+
+ bits = eeprom_address_size(adapter->eeprom_size);
+
+ /* select EEPROM, mask off ASIC and reset bits, set EECS */
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ x &= ~(EEDI | EEDO | EESK);
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status)); /* flush command to card */
+ udelay(EEPROM_STALL_TIME);
+ x |= EECS;
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+
+ shift_out_bits(adapter, EEPROM_EWEN_OPCODE, 5);
+ shift_out_bits(adapter, reg, (u16) (bits - 2));
+ if (!eeprom_wait_cmd_done(adapter))
+ return;
+
+ /* write the new word to the EEPROM & send the write opcode the EEPORM */
+ shift_out_bits(adapter, EEPROM_WRITE_OPCODE, 3);
+
+ /* select which word in the EEPROM that we are writing to */
+ shift_out_bits(adapter, reg, bits);
+
+ /* write the data to the selected EEPROM word */
+ shift_out_bits(adapter, data, 16);
+ if (!eeprom_wait_cmd_done(adapter))
+ return;
+
+ shift_out_bits(adapter, EEPROM_EWDS_OPCODE, 5);
+ shift_out_bits(adapter, reg, (u16) (bits - 2));
+ if (!eeprom_wait_cmd_done(adapter))
+ return;
+
+ e100_eeprom_cleanup(adapter);
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: e100_eeprom_write_block
+//
+// Description: This routine writes a block of words starting from specified EEPROM
+// location and updates checksum
+// Arguments: reg - The EEPROM word that we are going to write to.
+// data - The data (word) that we are going to write to the EEPROM.
+//----------------------------------------------------------------------------------------
+void
+e100_eeprom_write_block(struct e100_private *adapter, u16 start, u16 *data,
+ u16 size)
+{
+ u16 checksum;
+ u16 i;
+
+ if (!adapter->eeprom_size)
+ adapter->eeprom_size = e100_eeprom_size(adapter);
+
+ // Set EEPROM semaphore.
+ if (adapter->rev_id >= D102_REV_ID) {
+ if (!eeprom_set_semaphore(adapter))
+ return;
+ }
+
+ for (i = 0; i < size; i++) {
+ e100_eeprom_write_word(adapter, start + i, data[i]);
+ }
+ //Update checksum
+ checksum = e100_eeprom_calculate_chksum(adapter);
+ e100_eeprom_write_word(adapter, (adapter->eeprom_size - 1), checksum);
+
+ // Clear EEPROM Semaphore.
+ if (adapter->rev_id >= D102_REV_ID) {
+ eeprom_reset_semaphore(adapter);
+ }
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: eeprom_wait_cmd_done
+//
+// Description: This routine waits for the the EEPROM to finish its command.
+// Specifically, it waits for EEDO (data out) to go high.
+// Returns: true - If the command finished
+// false - If the command never finished (EEDO stayed low)
+//----------------------------------------------------------------------------------------
+static u16
+eeprom_wait_cmd_done(struct e100_private *adapter)
+{
+ u16 x;
+ unsigned long expiration_time = jiffies + HZ / 100 + 1;
+
+ eeprom_stand_by(adapter);
+
+ do {
+ rmb();
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ if (x & EEDO)
+ return true;
+ if (time_before(jiffies, expiration_time))
+ yield();
+ else
+ return false;
+ } while (true);
+}
+
+//----------------------------------------------------------------------------------------
+// Procedure: eeprom_stand_by
+//
+// Description: This routine lowers the EEPROM chip select (EECS) for a few microseconds.
+//----------------------------------------------------------------------------------------
+static void
+eeprom_stand_by(struct e100_private *adapter)
+{
+ u16 x;
+
+ x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
+ x &= ~(EECS | EESK);
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status)); /* flush command to card */
+ udelay(EEPROM_STALL_TIME);
+ x |= EECS;
+ writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
+ readw(&(adapter->scb->scb_status)); /* flush command to card */
+ udelay(EEPROM_STALL_TIME);
+}
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+/**********************************************************************
+* *
+* INTEL CORPORATION *
+* *
+* This software is supplied under the terms of the license included *
+* above. All use of this driver must be in accordance with the terms *
+* of that license. *
+* *
+* Module Name: e100_main.c *
+* *
+* Abstract: Functions for the driver entry points like load, *
+* unload, open and close. All board specific calls made *
+* by the network interface section of the driver. *
+* *
+* Environment: This file is intended to be specific to the Linux *
+* operating system. *
+* *
+**********************************************************************/
+
+/* Change Log
+ *
+ * 2.2.21 02/11/03
+ * o Removed marketing brand strings. Instead, Using generic string
+ * "Intel(R) PRO/100 Network Connection" for all adapters.
+ * o Implemented ethtool -S option
+ * o Strip /proc/net/PRO_LAN_Adapters files for kernel driver
+ * o Bug fix: Read wrong byte in EEPROM when offset is odd number
+ * o Bug fix: PHY loopback test fails on ICH devices
+ * o Bug fix: System panic on e100_close when repeating Hot Remove and
+ * Add in a team
+ * o Bug fix: Linux Bonding driver claims adapter's link loss because of
+ * not updating last_rx field
+ * o Bug fix: e100 does not check validity of MAC address
+ * o New feature: added ICH5 support
+ *
+ * 2.1.27 11/20/02
+ * o Bug fix: Device command timeout due to SMBus processing during init
+ * o Bug fix: Not setting/clearing I (Interrupt) bit in tcb correctly
+ * o Bug fix: Not using EEPROM WoL setting as default in ethtool
+ * o Bug fix: Not able to set autoneg on using ethtool when interface down
+ * o Bug fix: Not able to change speed/duplex using ethtool/mii
+ * when interface up
+ * o Bug fix: Ethtool shows autoneg on when forced to 100/Full
+ * o Bug fix: Compiler error when CONFIG_PROC_FS not defined
+ * o Bug fix: 2.5.44 e100 doesn't load with preemptive kernel enabled
+ * (sleep while holding spinlock)
+ * o Bug fix: 2.1.24-k1 doesn't display complete statistics
+ * o Bug fix: System panic due to NULL watchdog timer dereference during
+ * ifconfig down, rmmod and insmod
+ *
+ * 2.1.24 10/7/02
+ */
+
+#include <linux/config.h>
+/*#include <net/checksum.h>*/
+/*#include <linux/tcp.h>*/
+/*#include <linux/udp.h>*/
+#include "e100.h"
+#include "e100_ucode.h"
+#include "e100_config.h"
+#include "e100_phy.h"
+
+extern void e100_force_speed_duplex_to_phy(struct e100_private *bdp);
+
+#if 0
+static char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+};
+#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
+#endif
+
+#if 0
+static int e100_do_ethtool_ioctl(struct net_device *, struct ifreq *);
+#endif
+static void e100_get_speed_duplex_caps(struct e100_private *);
+#if 0
+static int e100_ethtool_get_settings(struct net_device *, struct ifreq *);
+static int e100_ethtool_set_settings(struct net_device *, struct ifreq *);
+
+static int e100_ethtool_get_drvinfo(struct net_device *, struct ifreq *);
+static int e100_ethtool_eeprom(struct net_device *, struct ifreq *);
+#endif
+
+#define E100_EEPROM_MAGIC 0x1234
+#if 0
+static int e100_ethtool_glink(struct net_device *, struct ifreq *);
+static int e100_ethtool_gregs(struct net_device *, struct ifreq *);
+static int e100_ethtool_nway_rst(struct net_device *, struct ifreq *);
+static int e100_ethtool_wol(struct net_device *, struct ifreq *);
+#endif
+#ifdef CONFIG_PM
+static unsigned char e100_setup_filter(struct e100_private *bdp);
+static void e100_do_wol(struct pci_dev *pcid, struct e100_private *bdp);
+#endif
+#if 0
+static u16 e100_get_ip_lbytes(struct net_device *dev);
+#endif
+extern void e100_config_wol(struct e100_private *bdp);
+extern u32 e100_run_diag(struct net_device *dev, u64 *test_info, u32 flags);
+#if 0
+static int e100_ethtool_test(struct net_device *, struct ifreq *);
+static int e100_ethtool_gstrings(struct net_device *, struct ifreq *);
+static char *test_strings[] = {
+ "E100_EEPROM_TEST_FAIL",
+ "E100_CHIP_TIMEOUT",
+ "E100_ROM_TEST_FAIL",
+ "E100_REG_TEST_FAIL",
+ "E100_MAC_TEST_FAIL",
+ "E100_LPBK_MAC_FAIL",
+ "E100_LPBK_PHY_FAIL"
+};
+#endif
+
+static int e100_ethtool_led_blink(struct net_device *, struct ifreq *);
+
+#if 0
+static int e100_mii_ioctl(struct net_device *, struct ifreq *, int);
+#endif
+
+static unsigned char e100_delayed_exec_non_cu_cmd(struct e100_private *,
+ nxmit_cb_entry_t *);
+static void e100_free_nontx_list(struct e100_private *);
+static void e100_non_tx_background(unsigned long);
+
+/* Global Data structures and variables */
+char e100_copyright[] __devinitdata = "Copyright (c) 2003 Intel Corporation";
+char e100_driver_version[]="2.2.21-k1";
+const char *e100_full_driver_name = "Intel(R) PRO/100 Network Driver";
+char e100_short_driver_name[] = "e100";
+static int e100nics = 0;
+static void e100_vlan_rx_register(struct net_device *netdev, struct vlan_group
+ *grp);
+static void e100_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e100_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+
+#ifdef CONFIG_PM
+static int e100_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
+static int e100_suspend(struct pci_dev *pcid, u32 state);
+static int e100_resume(struct pci_dev *pcid);
+struct notifier_block e100_notifier_reboot = {
+ .notifier_call = e100_notify_reboot,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+
+/*********************************************************************/
+/*! This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ *********************************************************************/
+#define E100_PARAM_INIT { [0 ... E100_MAX_NIC] = -1 }
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define E100_PARAM(X, S) \
+ static const int X[E100_MAX_NIC + 1] = E100_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(E100_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, S);
+
+/* ====================================================================== */
+static u8 e100_D101M_checksum(struct e100_private *, struct sk_buff *);
+static u8 e100_D102_check_checksum(rfd_t *);
+#if 0
+static int e100_ioctl(struct net_device *, struct ifreq *, int);
+#endif
+static int e100_open(struct net_device *);
+static int e100_close(struct net_device *);
+static int e100_change_mtu(struct net_device *, int);
+static int e100_xmit_frame(struct sk_buff *, struct net_device *);
+static unsigned char e100_init(struct e100_private *);
+static int e100_set_mac(struct net_device *, void *);
+struct net_device_stats *e100_get_stats(struct net_device *);
+
+static void e100intr(int, void *, struct pt_regs *);
+static void e100_print_brd_conf(struct e100_private *);
+static void e100_set_multi(struct net_device *);
+void e100_set_speed_duplex(struct e100_private *);
+
+static u8 e100_pci_setup(struct pci_dev *, struct e100_private *);
+static u8 e100_sw_init(struct e100_private *);
+static void e100_tco_workaround(struct e100_private *);
+static unsigned char e100_alloc_space(struct e100_private *);
+static void e100_dealloc_space(struct e100_private *);
+static int e100_alloc_tcb_pool(struct e100_private *);
+static void e100_setup_tcb_pool(tcb_t *, unsigned int, struct e100_private *);
+static void e100_free_tcb_pool(struct e100_private *);
+static int e100_alloc_rfd_pool(struct e100_private *);
+static void e100_free_rfd_pool(struct e100_private *);
+
+static void e100_rd_eaddr(struct e100_private *);
+static void e100_rd_pwa_no(struct e100_private *);
+extern u16 e100_eeprom_read(struct e100_private *, u16);
+extern void e100_eeprom_write_block(struct e100_private *, u16, u16 *, u16);
+extern u16 e100_eeprom_size(struct e100_private *);
+u16 e100_eeprom_calculate_chksum(struct e100_private *adapter);
+
+static unsigned char e100_clr_cntrs(struct e100_private *);
+static unsigned char e100_load_microcode(struct e100_private *);
+static unsigned char e100_hw_init(struct e100_private *);
+static unsigned char e100_setup_iaaddr(struct e100_private *, u8 *);
+static unsigned char e100_update_stats(struct e100_private *bdp);
+
+static void e100_start_ru(struct e100_private *);
+static void e100_dump_stats_cntrs(struct e100_private *);
+
+static void e100_check_options(int board, struct e100_private *bdp);
+static void e100_set_int_option(int *, int, int, int, int, char *);
+static void e100_set_bool_option(struct e100_private *bdp, int, u32, int,
+ char *);
+unsigned char e100_wait_exec_cmplx(struct e100_private *, u32, u8, u8);
+void e100_exec_cmplx(struct e100_private *, u32, u8);
+static unsigned char e100_asf_enabled(struct e100_private *bdp);
+
+/**
+ * e100_get_rx_struct - retrieve cell to hold skb buff from the pool
+ * @bdp: atapter's private data struct
+ *
+ * Returns the new cell to hold sk_buff or %NULL.
+ */
+static inline struct rx_list_elem *
+e100_get_rx_struct(struct e100_private *bdp)
+{
+ struct rx_list_elem *rx_struct = NULL;
+
+ if (!list_empty(&(bdp->rx_struct_pool))) {
+ rx_struct = list_entry(bdp->rx_struct_pool.next,
+ struct rx_list_elem, list_elem);
+ list_del(&(rx_struct->list_elem));
+ }
+
+ return rx_struct;
+}
+
+/**
+ * e100_alloc_skb - allocate an skb for the adapter
+ * @bdp: atapter's private data struct
+ *
+ * Allocates skb with enough room for rfd, and data, and reserve non-data space.
+ * Returns the new cell with sk_buff or %NULL.
+ */
+static inline struct rx_list_elem *
+e100_alloc_skb(struct e100_private *bdp)
+{
+ struct sk_buff *new_skb;
+ u32 skb_size = sizeof (rfd_t);
+ struct rx_list_elem *rx_struct;
+
+ new_skb = (struct sk_buff *) dev_alloc_skb(skb_size);
+ if (new_skb) {
+ /* The IP data should be
+ DWORD aligned. since the ethernet header is 14 bytes long,
+ we need to reserve 2 extra bytes so that the TCP/IP headers
+ will be DWORD aligned. */
+ skb_reserve(new_skb, 2);
+ if ((rx_struct = e100_get_rx_struct(bdp)) == NULL)
+ goto err;
+ rx_struct->skb = new_skb;
+ rx_struct->dma_addr = pci_map_single(bdp->pdev, new_skb->data,
+ sizeof (rfd_t),
+ PCI_DMA_FROMDEVICE);
+ if (!rx_struct->dma_addr)
+ goto err;
+ skb_reserve(new_skb, bdp->rfd_size);
+ return rx_struct;
+ } else {
+ return NULL;
+ }
+
+err:
+ dev_kfree_skb_irq(new_skb);
+ return NULL;
+}
+
+/**
+ * e100_add_skb_to_end - add an skb to the end of our rfd list
+ * @bdp: atapter's private data struct
+ * @rx_struct: rx_list_elem with the new skb
+ *
+ * Adds a newly allocated skb to the end of our rfd list.
+ */
+inline void
+e100_add_skb_to_end(struct e100_private *bdp, struct rx_list_elem *rx_struct)
+{
+ rfd_t *rfdn; /* The new rfd */
+ rfd_t *rfd; /* The old rfd */
+ struct rx_list_elem *rx_struct_last;
+
+ (rx_struct->skb)->dev = bdp->device;
+ rfdn = RFD_POINTER(rx_struct->skb, bdp);
+ rfdn->rfd_header.cb_status = 0;
+ rfdn->rfd_header.cb_cmd = __constant_cpu_to_le16(RFD_EL_BIT);
+ rfdn->rfd_act_cnt = 0;
+ rfdn->rfd_sz = __constant_cpu_to_le16(RFD_DATA_SIZE);
+
+ pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr, bdp->rfd_size,
+ PCI_DMA_TODEVICE);
+
+ if (!list_empty(&(bdp->active_rx_list))) {
+ rx_struct_last = list_entry(bdp->active_rx_list.prev,
+ struct rx_list_elem, list_elem);
+ rfd = RFD_POINTER(rx_struct_last->skb, bdp);
+ pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
+ 4, PCI_DMA_FROMDEVICE);
+ put_unaligned(cpu_to_le32(rx_struct->dma_addr),
+ ((u32 *) (&(rfd->rfd_header.cb_lnk_ptr))));
+
+ pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
+ 8, PCI_DMA_TODEVICE);
+ rfd->rfd_header.cb_cmd &=
+ __constant_cpu_to_le16((u16) ~RFD_EL_BIT);
+
+ pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
+ 4, PCI_DMA_TODEVICE);
+ }
+
+ list_add_tail(&(rx_struct->list_elem), &(bdp->active_rx_list));
+}
+
+static inline void
+e100_alloc_skbs(struct e100_private *bdp)
+{
+ for (; bdp->skb_req > 0; bdp->skb_req--) {
+ struct rx_list_elem *rx_struct;
+
+ if ((rx_struct = e100_alloc_skb(bdp)) == NULL)
+ return;
+
+ e100_add_skb_to_end(bdp, rx_struct);
+ }
+}
+
+void e100_tx_srv(struct e100_private *);
+u32 e100_rx_srv(struct e100_private *);
+
+void e100_watchdog(struct net_device *);
+void e100_refresh_txthld(struct e100_private *);
+void e100_manage_adaptive_ifs(struct e100_private *);
+void e100_clear_pools(struct e100_private *);
+static void e100_clear_structs(struct net_device *);
+static inline tcb_t *e100_prepare_xmit_buff(struct e100_private *,
+ struct sk_buff *);
+static void e100_set_multi_exec(struct net_device *dev);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/100 Network Driver");
+MODULE_LICENSE("GPL");
+
+E100_PARAM(TxDescriptors, "Number of transmit descriptors");
+E100_PARAM(RxDescriptors, "Number of receive descriptors");
+E100_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+E100_PARAM(e100_speed_duplex, "Speed and Duplex settings");
+E100_PARAM(ucode, "Disable or enable microcode loading");
+E100_PARAM(ber, "Value for the BER correction algorithm");
+E100_PARAM(flow_control, "Disable or enable Ethernet PAUSE frames processing");
+E100_PARAM(IntDelay, "Value for CPU saver's interrupt delay");
+E100_PARAM(BundleSmallFr, "Disable or enable interrupt bundling of small frames");
+E100_PARAM(BundleMax, "Maximum number for CPU saver's packet bundling");
+E100_PARAM(IFS, "Disable or enable the adaptive IFS algorithm");
+
+/**
+ * e100_exec_cmd - issue a comand
+ * @bdp: atapter's private data struct
+ * @scb_cmd_low: the command that is to be issued
+ *
+ * This general routine will issue a command to the e100.
+ */
+static inline void
+e100_exec_cmd(struct e100_private *bdp, u8 cmd_low)
+{
+ writeb(cmd_low, &(bdp->scb->scb_cmd_low));
+ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
+}
+
+/**
+ * e100_wait_scb - wait for SCB to clear
+ * @bdp: atapter's private data struct
+ *
+ * This routine checks to see if the e100 has accepted a command.
+ * It does so by checking the command field in the SCB, which will
+ * be zeroed by the e100 upon accepting a command. The loop waits
+ * for up to 1 millisecond for command acceptance.
+ *
+ * Returns:
+ * true if the SCB cleared within 1 millisecond.
+ * false if it didn't clear within 1 millisecond
+ */
+unsigned char
+e100_wait_scb(struct e100_private *bdp)
+{
+ int i;
+
+ /* loop on the scb for a few times */
+ for (i = 0; i < 100; i++) {
+ if (!readb(&bdp->scb->scb_cmd_low))
+ return true;
+ cpu_relax();
+ }
+
+ /* it didn't work. do it the slow way using udelay()s */
+ for (i = 0; i < E100_MAX_SCB_WAIT; i++) {
+ if (!readb(&bdp->scb->scb_cmd_low))
+ return true;
+ cpu_relax();
+ udelay(1);
+ }
+
+ return false;
+}
+
+/**
+ * e100_wait_exec_simple - issue a command
+ * @bdp: atapter's private data struct
+ * @scb_cmd_low: the command that is to be issued
+ *
+ * This general routine will issue a command to the e100 after waiting for
+ * the previous command to finish.
+ *
+ * Returns:
+ * true if the command was issued to the chip successfully
+ * false if the command was not issued to the chip
+ */
+inline unsigned char
+e100_wait_exec_simple(struct e100_private *bdp, u8 scb_cmd_low)
+{
+ if (!e100_wait_scb(bdp)) {
+ printk(KERN_DEBUG "e100: %s: e100_wait_exec_simple: failed\n",
+ bdp->device->name);
+#ifdef E100_CU_DEBUG
+ printk(KERN_ERR "e100: %s: Last command (%x/%x) "
+ "timeout\n", bdp->device->name,
+ bdp->last_cmd, bdp->last_sub_cmd);
+ printk(KERN_ERR "e100: %s: Current simple command (%x) "
+ "can't be executed\n",
+ bdp->device->name, scb_cmd_low);
+#endif
+ return false;
+ }
+ e100_exec_cmd(bdp, scb_cmd_low);
+#ifdef E100_CU_DEBUG
+ bdp->last_cmd = scb_cmd_low;
+ bdp->last_sub_cmd = 0;
+#endif
+ return true;
+}
+
+void
+e100_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd)
+{
+ writel(phys_addr, &(bdp->scb->scb_gen_ptr));
+ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
+ e100_exec_cmd(bdp, cmd);
+}
+
+unsigned char
+e100_wait_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd, u8 sub_cmd)
+{
+ if (!e100_wait_scb(bdp)) {
+#ifdef E100_CU_DEBUG
+ printk(KERN_ERR "e100: %s: Last command (%x/%x) "
+ "timeout\n", bdp->device->name,
+ bdp->last_cmd, bdp->last_sub_cmd);
+ printk(KERN_ERR "e100: %s: Current complex command "
+ "(%x/%x) can't be executed\n",
+ bdp->device->name, cmd, sub_cmd);
+#endif
+ return false;
+ }
+ e100_exec_cmplx(bdp, phys_addr, cmd);
+#ifdef E100_CU_DEBUG
+ bdp->last_cmd = cmd;
+ bdp->last_sub_cmd = sub_cmd;
+#endif
+ return true;
+}
+
+inline u8
+e100_wait_cus_idle(struct e100_private *bdp)
+{
+ int i;
+
+ /* loop on the scb for a few times */
+ for (i = 0; i < 100; i++) {
+ if (((readw(&(bdp->scb->scb_status)) & SCB_CUS_MASK) !=
+ SCB_CUS_ACTIVE)) {
+ return true;
+ }
+ cpu_relax();
+ }
+
+ for (i = 0; i < E100_MAX_CU_IDLE_WAIT; i++) {
+ if (((readw(&(bdp->scb->scb_status)) & SCB_CUS_MASK) !=
+ SCB_CUS_ACTIVE)) {
+ return true;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+
+ return false;
+}
+
+/**
+ * e100_disable_clear_intr - disable and clear/ack interrupts
+ * @bdp: atapter's private data struct
+ *
+ * This routine disables interrupts at the hardware, by setting
+ * the M (mask) bit in the adapter's CSR SCB command word.
+ * It also clear/ack interrupts.
+ */
+static inline void
+e100_disable_clear_intr(struct e100_private *bdp)
+{
+ u16 intr_status;
+ /* Disable interrupts on our PCI board by setting the mask bit */
+ writeb(SCB_INT_MASK, &bdp->scb->scb_cmd_hi);
+ intr_status = readw(&bdp->scb->scb_status);
+ /* ack and clear intrs */
+ writew(intr_status, &bdp->scb->scb_status);
+ readw(&bdp->scb->scb_status);
+}
+
+/**
+ * e100_set_intr_mask - set interrupts
+ * @bdp: atapter's private data struct
+ *
+ * This routine sets interrupts at the hardware, by resetting
+ * the M (mask) bit in the adapter's CSR SCB command word
+ */
+static inline void
+e100_set_intr_mask(struct e100_private *bdp)
+{
+ writeb(bdp->intr_mask, &bdp->scb->scb_cmd_hi);
+ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
+}
+
+static inline void
+e100_trigger_SWI(struct e100_private *bdp)
+{
+ /* Trigger interrupt on our PCI board by asserting SWI bit */
+ writeb(SCB_SOFT_INT, &bdp->scb->scb_cmd_hi);
+ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
+}
+
+static int __devinit
+e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
+{
+ static int first_time = true;
+ struct net_device *dev = NULL;
+ struct e100_private *bdp = NULL;
+ int rc = 0;
+ u16 cal_checksum, read_checksum;
+
+ dev = alloc_etherdev(sizeof (struct e100_private));
+ if (dev == NULL) {
+ printk(KERN_ERR "e100: Not able to alloc etherdev struct\n");
+ rc = -ENODEV;
+ goto out;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (first_time) {
+ first_time = false;
+ printk(KERN_NOTICE "%s - version %s\n",
+ e100_full_driver_name, e100_driver_version);
+ printk(KERN_NOTICE "%s\n", e100_copyright);
+ printk(KERN_NOTICE "\n");
+ }
+
+ bdp = dev->priv;
+ bdp->pdev = pcid;
+ bdp->device = dev;
+
+ pci_set_drvdata(pcid, dev);
+
+ if ((rc = e100_alloc_space(bdp)) != 0) {
+ goto err_dev;
+ }
+
+ bdp->flags = 0;
+ bdp->ifs_state = 0;
+ bdp->ifs_value = 0;
+ bdp->scb = 0;
+
+ init_timer(&bdp->nontx_timer_id);
+ bdp->nontx_timer_id.data = (unsigned long) bdp;
+ bdp->nontx_timer_id.function = (void *) &e100_non_tx_background;
+ INIT_LIST_HEAD(&(bdp->non_tx_cmd_list));
+ bdp->non_tx_command_state = E100_NON_TX_IDLE;
+
+ init_timer(&bdp->watchdog_timer);
+ bdp->watchdog_timer.data = (unsigned long) dev;
+ bdp->watchdog_timer.function = (void *) &e100_watchdog;
+
+ if ((rc = e100_pci_setup(pcid, bdp)) != 0) {
+ goto err_dealloc;
+ }
+
+ if (((bdp->pdev->device > 0x1030)
+ && (bdp->pdev->device < 0x103F))
+ || ((bdp->pdev->device >= 0x1050)
+ && (bdp->pdev->device <= 0x1057))
+ || (bdp->pdev->device == 0x2449)
+ || (bdp->pdev->device == 0x2459)
+ || (bdp->pdev->device == 0x245D)) {
+ bdp->rev_id = D101MA_REV_ID; /* workaround for ICH3 */
+ bdp->flags |= IS_ICH;
+ }
+
+ if (bdp->rev_id == 0xff)
+ bdp->rev_id = 1;
+
+ if ((u8) bdp->rev_id >= D101A4_REV_ID)
+ bdp->flags |= IS_BACHELOR;
+
+ if ((u8) bdp->rev_id >= D102_REV_ID) {
+ bdp->flags |= USE_IPCB;
+ bdp->rfd_size = 32;
+ } else {
+ bdp->rfd_size = 16;
+ }
+ e100_check_options(e100nics, bdp);
+
+ if (!e100_init(bdp)) {
+ printk(KERN_ERR "e100: Failed to initialize, instance #%d\n",
+ e100nics);
+ rc = -ENODEV;
+ goto err_pci;
+ }
+
+ /* Check if checksum is valid */
+ cal_checksum = e100_eeprom_calculate_chksum(bdp);
+ read_checksum = e100_eeprom_read(bdp, (bdp->eeprom_size - 1));
+ if (cal_checksum != read_checksum) {
+ printk(KERN_ERR "e100: Corrupted EEPROM on instance #%d\n",
+ e100nics);
+ rc = -ENODEV;
+ goto err_pci;
+ }
+
+ dev->vlan_rx_register = e100_vlan_rx_register;
+ dev->vlan_rx_add_vid = e100_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = e100_vlan_rx_kill_vid;
+ dev->irq = pcid->irq;
+ dev->open = &e100_open;
+ dev->hard_start_xmit = &e100_xmit_frame;
+ dev->stop = &e100_close;
+ dev->change_mtu = &e100_change_mtu;
+ dev->get_stats = &e100_get_stats;
+ dev->set_multicast_list = &e100_set_multi;
+ dev->set_mac_address = &e100_set_mac;
+#if 0
+ dev->do_ioctl = &e100_ioctl;
+#endif
+
+ if (bdp->flags & USE_IPCB)
+ dev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ e100nics++;
+
+ e100_get_speed_duplex_caps(bdp);
+
+ if ((rc = register_netdev(dev)) != 0) {
+ goto err_pci;
+ }
+ memcpy(bdp->ifname, dev->name, IFNAMSIZ);
+ bdp->ifname[IFNAMSIZ-1] = 0;
+
+ printk(KERN_NOTICE
+ "e100: %s: %s\n",
+ bdp->device->name, "Intel(R) PRO/100 Network Connection");
+ e100_print_brd_conf(bdp);
+
+ bdp->wolsupported = 0;
+ bdp->wolopts = 0;
+
+ /* Check if WoL is enabled on EEPROM */
+ if (e100_eeprom_read(bdp, EEPROM_ID_WORD) & BIT_5) {
+ /* Magic Packet WoL is enabled on device by default */
+ /* if EEPROM WoL bit is TRUE */
+ bdp->wolsupported = WAKE_MAGIC;
+ bdp->wolopts = WAKE_MAGIC;
+ if (bdp->rev_id >= D101A4_REV_ID)
+ bdp->wolsupported = WAKE_PHY | WAKE_MAGIC;
+ if (bdp->rev_id >= D101MA_REV_ID)
+ bdp->wolsupported |= WAKE_UCAST | WAKE_ARP;
+ }
+
+ printk(KERN_NOTICE "\n");
+
+ if ( !(dev->features & NETIF_F_SG) )
+ alert_slow_netdevice(dev, (char *)e100_full_driver_name);
+
+ goto out;
+
+err_pci:
+ iounmap(bdp->scb);
+ pci_release_regions(pcid);
+ pci_disable_device(pcid);
+err_dealloc:
+ e100_dealloc_space(bdp);
+err_dev:
+ pci_set_drvdata(pcid, NULL);
+ kfree(dev);
+out:
+ return rc;
+}
+
+/**
+ * e100_clear_structs - free resources
+ * @dev: adapter's net_device struct
+ *
+ * Free all device specific structs, unmap i/o address, etc.
+ */
+static void __devexit
+e100_clear_structs(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+
+ iounmap(bdp->scb);
+ pci_release_regions(bdp->pdev);
+ pci_disable_device(bdp->pdev);
+
+ e100_dealloc_space(bdp);
+ pci_set_drvdata(bdp->pdev, NULL);
+ kfree(dev);
+}
+
+static void __devexit
+e100_remove1(struct pci_dev *pcid)
+{
+ struct net_device *dev;
+ struct e100_private *bdp;
+
+ if (!(dev = (struct net_device *) pci_get_drvdata(pcid)))
+ return;
+
+ bdp = dev->priv;
+
+ unregister_netdev(dev);
+
+ e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
+
+ if (bdp->non_tx_command_state != E100_NON_TX_IDLE) {
+ del_timer_sync(&bdp->nontx_timer_id);
+ e100_free_nontx_list(bdp);
+ bdp->non_tx_command_state = E100_NON_TX_IDLE;
+ }
+
+ e100_clear_structs(dev);
+
+ --e100nics;
+}
+
+static struct pci_device_id e100_id_table[] __devinitdata = {
+ {0x8086, 0x1229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x2449, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1209, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1031, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1034, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1038, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x103A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x103B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x103C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x103D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x103E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x1055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x2459, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0x8086, 0x245D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ {0,} /* This has to be the last entry*/
+};
+MODULE_DEVICE_TABLE(pci, e100_id_table);
+
+static struct pci_driver e100_driver = {
+ .name = "e100",
+ .id_table = e100_id_table,
+ .probe = e100_found1,
+ .remove = __devexit_p(e100_remove1),
+#ifdef CONFIG_PM
+ .suspend = e100_suspend,
+ .resume = e100_resume,
+#endif
+};
+
+static int __init
+e100_init_module(void)
+{
+ int ret;
+ ret = pci_module_init(&e100_driver);
+
+ if(ret >= 0) {
+#ifdef CONFIG_PM
+ register_reboot_notifier(&e100_notifier_reboot);
+#endif
+ }
+
+ return ret;
+}
+
+static void __exit
+e100_cleanup_module(void)
+{
+#ifdef CONFIG_PM
+ unregister_reboot_notifier(&e100_notifier_reboot);
+#endif
+
+ pci_unregister_driver(&e100_driver);
+}
+
+module_init(e100_init_module);
+module_exit(e100_cleanup_module);
+
+/**
+ * e100_check_options - check command line options
+ * @board: board number
+ * @bdp: atapter's private data struct
+ *
+ * This routine does range checking on command-line options
+ */
+void __devinit
+e100_check_options(int board, struct e100_private *bdp)
+{
+ if (board >= E100_MAX_NIC) {
+ printk(KERN_NOTICE
+ "e100: No configuration available for board #%d\n",
+ board);
+ printk(KERN_NOTICE "e100: Using defaults for all values\n");
+ board = E100_MAX_NIC;
+ }
+
+ e100_set_int_option(&(bdp->params.TxDescriptors), TxDescriptors[board],
+ E100_MIN_TCB, E100_MAX_TCB, E100_DEFAULT_TCB,
+ "TxDescriptor count");
+
+ e100_set_int_option(&(bdp->params.RxDescriptors), RxDescriptors[board],
+ E100_MIN_RFD, E100_MAX_RFD, E100_DEFAULT_RFD,
+ "RxDescriptor count");
+
+ e100_set_int_option(&(bdp->params.e100_speed_duplex),
+ e100_speed_duplex[board], 0, 4,
+ E100_DEFAULT_SPEED_DUPLEX, "speed/duplex mode");
+
+ e100_set_int_option(&(bdp->params.ber), ber[board], 0, ZLOCK_MAX_ERRORS,
+ E100_DEFAULT_BER, "Bit Error Rate count");
+
+ e100_set_bool_option(bdp, XsumRX[board], PRM_XSUMRX, E100_DEFAULT_XSUM,
+ "XsumRX value");
+
+ /* Default ucode value depended on controller revision */
+ if (bdp->rev_id >= D101MA_REV_ID) {
+ e100_set_bool_option(bdp, ucode[board], PRM_UCODE,
+ E100_DEFAULT_UCODE, "ucode value");
+ } else {
+ e100_set_bool_option(bdp, ucode[board], PRM_UCODE, false,
+ "ucode value");
+ }
+
+ e100_set_bool_option(bdp, flow_control[board], PRM_FC, E100_DEFAULT_FC,
+ "flow control value");
+
+ e100_set_bool_option(bdp, IFS[board], PRM_IFS, E100_DEFAULT_IFS,
+ "IFS value");
+
+ e100_set_bool_option(bdp, BundleSmallFr[board], PRM_BUNDLE_SMALL,
+ E100_DEFAULT_BUNDLE_SMALL_FR,
+ "CPU saver bundle small frames value");
+
+ e100_set_int_option(&(bdp->params.IntDelay), IntDelay[board], 0x0,
+ 0xFFFF, E100_DEFAULT_CPUSAVER_INTERRUPT_DELAY,
+ "CPU saver interrupt delay value");
+
+ e100_set_int_option(&(bdp->params.BundleMax), BundleMax[board], 0x1,
+ 0xFFFF, E100_DEFAULT_CPUSAVER_BUNDLE_MAX,
+ "CPU saver bundle max value");
+
+}
+
+/**
+ * e100_set_int_option - check and set an integer option
+ * @option: a pointer to the relevant option field
+ * @val: the value specified
+ * @min: the minimum valid value
+ * @max: the maximum valid value
+ * @default_val: the default value
+ * @name: the name of the option
+ *
+ * This routine does range checking on a command-line option.
+ * If the option's value is '-1' use the specified default.
+ * Otherwise, if the value is invalid, change it to the default.
+ */
+void __devinit
+e100_set_int_option(int *option, int val, int min, int max, int default_val,
+ char *name)
+{
+ if (val == -1) { /* no value specified. use default */
+ *option = default_val;
+
+ } else if ((val < min) || (val > max)) {
+ printk(KERN_NOTICE
+ "e100: Invalid %s specified (%i). "
+ "Valid range is %i-%i\n",
+ name, val, min, max);
+ printk(KERN_NOTICE "e100: Using default %s of %i\n", name,
+ default_val);
+ *option = default_val;
+ } else {
+ printk(KERN_INFO "e100: Using specified %s of %i\n", name, val);
+ *option = val;
+ }
+}
+
+/**
+ * e100_set_bool_option - check and set a boolean option
+ * @bdp: atapter's private data struct
+ * @val: the value specified
+ * @mask: the mask for the relevant option
+ * @default_val: the default value
+ * @name: the name of the option
+ *
+ * This routine checks a boolean command-line option.
+ * If the option's value is '-1' use the specified default.
+ * Otherwise, if the value is invalid (not 0 or 1),
+ * change it to the default.
+ */
+void __devinit
+e100_set_bool_option(struct e100_private *bdp, int val, u32 mask,
+ int default_val, char *name)
+{
+ if (val == -1) {
+ if (default_val)
+ bdp->params.b_params |= mask;
+
+ } else if ((val != true) && (val != false)) {
+ printk(KERN_NOTICE
+ "e100: Invalid %s specified (%i). "
+ "Valid values are %i/%i\n",
+ name, val, false, true);
+ printk(KERN_NOTICE "e100: Using default %s of %i\n", name,
+ default_val);
+
+ if (default_val)
+ bdp->params.b_params |= mask;
+ } else {
+ printk(KERN_INFO "e100: Using specified %s of %i\n", name, val);
+ if (val)
+ bdp->params.b_params |= mask;
+ }
+}
+
+static int
+e100_open(struct net_device *dev)
+{
+ struct e100_private *bdp;
+ int rc = 0;
+
+ bdp = dev->priv;
+
+ /* setup the tcb pool */
+ if (!e100_alloc_tcb_pool(bdp)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ bdp->last_tcb = NULL;
+
+ bdp->tcb_pool.head = 0;
+ bdp->tcb_pool.tail = 1;
+
+ e100_setup_tcb_pool((tcb_t *) bdp->tcb_pool.data,
+ bdp->params.TxDescriptors, bdp);
+
+ if (!e100_alloc_rfd_pool(bdp)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0)) {
+ rc = -EAGAIN;
+ goto err_exit;
+ }
+
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0)) {
+ rc = -EAGAIN;
+ goto err_exit;
+ }
+
+ mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
+
+ netif_start_queue(dev);
+
+ e100_start_ru(bdp);
+ if ((rc = request_irq(dev->irq, &e100intr, SA_SHIRQ,
+ dev->name, dev)) != 0) {
+ del_timer_sync(&bdp->watchdog_timer);
+ goto err_exit;
+ }
+ bdp->intr_mask = 0;
+ e100_set_intr_mask(bdp);
+
+ e100_force_config(bdp);
+
+ goto exit;
+
+err_exit:
+ e100_clear_pools(bdp);
+exit:
+ return rc;
+}
+
+static int
+e100_close(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+
+ e100_disable_clear_intr(bdp);
+ free_irq(dev->irq, dev);
+ bdp->intr_mask = SCB_INT_MASK;
+ e100_isolate_driver(bdp);
+
+ netif_carrier_off(bdp->device);
+ bdp->cur_line_speed = 0;
+ bdp->cur_dplx_mode = 0;
+ e100_clear_pools(bdp);
+
+ return 0;
+}
+
+static int
+e100_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int
+e100_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ int rc = 0;
+ int notify_stop = false;
+ struct e100_private *bdp = dev->priv;
+
+ if (!spin_trylock(&bdp->bd_non_tx_lock)) {
+ notify_stop = true;
+ rc = 1;
+ goto exit2;
+ }
+
+ if (!TCBS_AVAIL(bdp->tcb_pool) ||
+ (bdp->non_tx_command_state != E100_NON_TX_IDLE)) {
+ notify_stop = true;
+ rc = 1;
+ goto exit1;
+ }
+
+ e100_prepare_xmit_buff(bdp, skb);
+
+ bdp->drv_stats.net_stats.tx_bytes += skb->len;
+
+ dev->trans_start = jiffies;
+
+exit1:
+ spin_unlock(&bdp->bd_non_tx_lock);
+exit2:
+ if (notify_stop) {
+ netif_stop_queue(dev);
+ }
+
+ return rc;
+}
+
+/**
+ * e100_get_stats - get driver statistics
+ * @dev: adapter's net_device struct
+ *
+ * This routine is called when the OS wants the adapter's stats returned.
+ * It returns the address of the net_device_stats stucture for the device.
+ * If the statistics are currently being updated, then they might be incorrect
+ * for a short while. However, since this cannot actually cause damage, no
+ * locking is used.
+ */
+struct net_device_stats *
+e100_get_stats(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+
+ bdp->drv_stats.net_stats.tx_errors =
+ bdp->drv_stats.net_stats.tx_carrier_errors +
+ bdp->drv_stats.net_stats.tx_aborted_errors;
+
+ bdp->drv_stats.net_stats.rx_errors =
+ bdp->drv_stats.net_stats.rx_crc_errors +
+ bdp->drv_stats.net_stats.rx_frame_errors +
+ bdp->drv_stats.net_stats.rx_length_errors +
+ bdp->drv_stats.rcv_cdt_frames;
+
+ return &(bdp->drv_stats.net_stats);
+}
+
+/**
+ * e100_set_mac - set the MAC address
+ * @dev: adapter's net_device struct
+ * @addr: the new address
+ *
+ * This routine sets the ethernet address of the board
+ * Returns:
+ * 0 - if successful
+ * -1 - otherwise
+ */
+static int
+e100_set_mac(struct net_device *dev, void *addr)
+{
+ struct e100_private *bdp;
+ int rc = -1;
+ struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
+
+ if (!is_valid_ether_addr(p_sockaddr->sa_data))
+ return -EADDRNOTAVAIL;
+ bdp = dev->priv;
+
+ if (e100_setup_iaaddr(bdp, (u8 *) (p_sockaddr->sa_data))) {
+ memcpy(&(dev->dev_addr[0]), p_sockaddr->sa_data, ETH_ALEN);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void
+e100_set_multi_exec(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+ mltcst_cb_t *mcast_buff;
+ cb_header_t *cb_hdr;
+ struct dev_mc_list *mc_list;
+ unsigned int i;
+ nxmit_cb_entry_t *cmd = e100_alloc_non_tx_cmd(bdp);
+
+ if (cmd != NULL) {
+ mcast_buff = &((cmd->non_tx_cmd)->ntcb.multicast);
+ cb_hdr = &((cmd->non_tx_cmd)->ntcb.multicast.mc_cbhdr);
+ } else {
+ return;
+ }
+
+ /* initialize the multi cast command */
+ cb_hdr->cb_cmd = __constant_cpu_to_le16(CB_MULTICAST);
+
+ /* now fill in the rest of the multicast command */
+ *(u16 *) (&(mcast_buff->mc_count)) = cpu_to_le16(dev->mc_count * 6);
+ for (i = 0, mc_list = dev->mc_list;
+ (i < dev->mc_count) && (i < MAX_MULTICAST_ADDRS);
+ i++, mc_list = mc_list->next) {
+ /* copy into the command */
+ memcpy(&(mcast_buff->mc_addr[i * ETH_ALEN]),
+ (u8 *) &(mc_list->dmi_addr), ETH_ALEN);
+ }
+
+ if (!e100_exec_non_cu_cmd(bdp, cmd)) {
+ printk(KERN_WARNING "e100: %s: Multicast setup failed\n",
+ dev->name);
+ }
+}
+
+/**
+ * e100_set_multi - set multicast status
+ * @dev: adapter's net_device struct
+ *
+ * This routine is called to add or remove multicast addresses, and/or to
+ * change the adapter's promiscuous state.
+ */
+static void
+e100_set_multi(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+ unsigned char promisc_enbl;
+ unsigned char mulcast_enbl;
+
+ promisc_enbl = ((dev->flags & IFF_PROMISC) == IFF_PROMISC);
+ mulcast_enbl = ((dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > MAX_MULTICAST_ADDRS));
+
+ e100_config_promisc(bdp, promisc_enbl);
+ e100_config_mulcast_enbl(bdp, mulcast_enbl);
+
+ /* reconfigure the chip if something has changed in its config space */
+ e100_config(bdp);
+
+ if (promisc_enbl || mulcast_enbl) {
+ return; /* no need for Multicast Cmd */
+ }
+
+ /* get the multicast CB */
+ e100_set_multi_exec(dev);
+}
+
+#if 0
+static int
+e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+
+ switch (cmd) {
+
+ case SIOCETHTOOL:
+ return e100_do_ethtool_ioctl(dev, ifr);
+ break;
+
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCSMIIREG: /* Write to MII PHY register. */
+ return e100_mii_ioctl(dev, ifr, cmd);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+
+}
+#endif
+
+/**
+ * e100init - initialize the adapter
+ * @bdp: atapter's private data struct
+ *
+ * This routine is called when this driver is loaded. This is the initialization
+ * routine which allocates memory, configures the adapter and determines the
+ * system resources.
+ *
+ * Returns:
+ * true: if successful
+ * false: otherwise
+ */
+static unsigned char __devinit
+e100_init(struct e100_private *bdp)
+{
+ u32 st_timeout = 0;
+ u32 st_result = 0;
+ e100_sw_init(bdp);
+
+ if (!e100_selftest(bdp, &st_timeout, &st_result)) {
+ if (st_timeout) {
+ printk(KERN_ERR "e100: selftest timeout\n");
+ } else {
+ printk(KERN_ERR "e100: selftest failed. Results: %x\n",
+ st_result);
+ }
+ return false;
+ }
+ else
+ printk(KERN_DEBUG "e100: selftest OK.\n");
+
+ /* read the MAC address from the eprom */
+ e100_rd_eaddr(bdp);
+ if (!is_valid_ether_addr(bdp->device->dev_addr)) {
+ printk(KERN_ERR "e100: Invalid Ethernet address\n");
+ return false;
+ }
+ /* read NIC's part number */
+ e100_rd_pwa_no(bdp);
+
+ if (!e100_hw_init(bdp)) {
+ printk(KERN_ERR "e100: hw init failed\n");
+ return false;
+ }
+ /* Interrupts are enabled after device reset */
+ e100_disable_clear_intr(bdp);
+
+ return true;
+}
+
+/**
+ * e100_sw_init - initialize software structs
+ * @bdp: atapter's private data struct
+ *
+ * This routine initializes all software structures. Sets up the
+ * circular structures for the RFD's & TCB's. Allocates the per board
+ * structure for storing adapter information. The CSR is also memory
+ * mapped in this routine.
+ *
+ * Returns :
+ * true: if S/W was successfully initialized
+ * false: otherwise
+ */
+static unsigned char __devinit
+e100_sw_init(struct e100_private *bdp)
+{
+ bdp->next_cu_cmd = START_WAIT; // init the next cu state
+
+ /*
+ * Set the value for # of good xmits per underrun. the value assigned
+ * here is an intelligent suggested default. Nothing magical about it.
+ */
+ bdp->tx_per_underrun = DEFAULT_TX_PER_UNDERRUN;
+
+ /* get the default transmit threshold value */
+ bdp->tx_thld = TX_THRSHLD;
+
+ /* get the EPROM size */
+ bdp->eeprom_size = e100_eeprom_size(bdp);
+
+ /* Initialize our spinlocks */
+ spin_lock_init(&(bdp->bd_lock));
+ spin_lock_init(&(bdp->bd_non_tx_lock));
+ spin_lock_init(&(bdp->config_lock));
+ spin_lock_init(&(bdp->mdi_access_lock));
+
+ return 1;
+}
+
+static void __devinit
+e100_tco_workaround(struct e100_private *bdp)
+{
+ int i;
+
+ /* Do software reset */
+ e100_sw_reset(bdp, PORT_SOFTWARE_RESET);
+
+ /* Do a dummy LOAD CU BASE command. */
+ /* This gets us out of pre-driver to post-driver. */
+ e100_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE);
+
+ /* Wait 20 msec for reset to take effect */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 50 + 1);
+
+ /* disable interrupts since they are enabled */
+ /* after device reset */
+ e100_disable_clear_intr(bdp);
+
+ /* Wait for command to be cleared up to 1 sec */
+ for (i=0; i<100; i++) {
+ if (!readb(&bdp->scb->scb_cmd_low))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 100 + 1);
+ }
+
+ /* Wait for TCO request bit in PMDR register to be clear */
+ for (i=0; i<50; i++) {
+ if (!(readb(&bdp->scb->scb_ext.d101m_scb.scb_pmdr) & BIT_1))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 100 + 1);
+ }
+}
+
+/**
+ * e100_hw_init - initialized tthe hardware
+ * @bdp: atapter's private data struct
+ *
+ * This routine performs a reset on the adapter, and configures the adapter.
+ * This includes configuring the 82557 LAN controller, validating and setting
+ * the node address, detecting and configuring the Phy chip on the adapter,
+ * and initializing all of the on chip counters.
+ *
+ * Returns:
+ * true - If the adapter was initialized
+ * false - If the adapter failed initialization
+ */
+unsigned char __devinit
+e100_hw_init(struct e100_private *bdp)
+{
+ if (!e100_phy_init(bdp))
+ return false;
+
+ e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
+
+ /* Only 82559 or above needs TCO workaround */
+ if (bdp->rev_id >= D101MA_REV_ID)
+ e100_tco_workaround(bdp);
+
+ /* Load the CU BASE (set to 0, because we use linear mode) */
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
+ return false;
+
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
+ return false;
+
+ /* Load interrupt microcode */
+ if (e100_load_microcode(bdp)) {
+ bdp->flags |= DF_UCODE_LOADED;
+ }
+
+ e100_config_init(bdp);
+ if (!e100_config(bdp)) {
+ return false;
+ }
+
+ if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr))
+ return false;
+
+ /* Clear the internal counters */
+ if (!e100_clr_cntrs(bdp))
+ return false;
+
+ /* Change for 82558 enhancement */
+ /* If 82558/9 and if the user has enabled flow control, set up the
+ * Flow Control Reg. in the CSR */
+ if ((bdp->flags & IS_BACHELOR)
+ && (bdp->params.b_params & PRM_FC)) {
+ writeb(DFLT_FC_THLD, &bdp->scb->scb_ext.d101_scb.scb_fc_thld);
+ writeb(DFLT_FC_CMD,
+ &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
+ }
+
+ return true;
+}
+
+/**
+ * e100_setup_tcb_pool - setup TCB circular list
+ * @head: Pointer to head of the allocated TCBs
+ * @qlen: Number of elements in the queue
+ * @bdp: atapter's private data struct
+ *
+ * This routine arranges the contigiously allocated TCB's in a circular list.
+ * Also does the one time initialization of the TCBs.
+ */
+static void
+e100_setup_tcb_pool(tcb_t *head, unsigned int qlen, struct e100_private *bdp)
+{
+ int ele_no;
+ tcb_t *pcurr_tcb; /* point to current tcb */
+ u32 next_phys; /* the next phys addr */
+ u16 txcommand = CB_S_BIT | CB_TX_SF_BIT;
+
+ bdp->tx_count = 0;
+ if (bdp->flags & USE_IPCB) {
+ txcommand |= CB_IPCB_TRANSMIT | CB_CID_DEFAULT;
+ } else if (bdp->flags & IS_BACHELOR) {
+ txcommand |= CB_TRANSMIT | CB_CID_DEFAULT;
+ } else {
+ txcommand |= CB_TRANSMIT;
+ }
+
+ for (ele_no = 0, next_phys = bdp->tcb_phys, pcurr_tcb = head;
+ ele_no < qlen; ele_no++, pcurr_tcb++) {
+
+ /* set the phys addr for this TCB, next_phys has not incr. yet */
+ pcurr_tcb->tcb_phys = next_phys;
+ next_phys += sizeof (tcb_t);
+
+ /* set the link to next tcb */
+ if (ele_no == (qlen - 1))
+ pcurr_tcb->tcb_hdr.cb_lnk_ptr =
+ cpu_to_le32(bdp->tcb_phys);
+ else
+ pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(next_phys);
+
+ pcurr_tcb->tcb_hdr.cb_status = 0;
+ pcurr_tcb->tcb_hdr.cb_cmd = cpu_to_le16(txcommand);
+ pcurr_tcb->tcb_cnt = 0;
+ pcurr_tcb->tcb_thrshld = bdp->tx_thld;
+ if (ele_no < 2) {
+ pcurr_tcb->tcb_hdr.cb_status =
+ cpu_to_le16(CB_STATUS_COMPLETE);
+ }
+ pcurr_tcb->tcb_tbd_num = 1;
+
+ if (bdp->flags & IS_BACHELOR) {
+ pcurr_tcb->tcb_tbd_ptr =
+ __constant_cpu_to_le32(0xFFFFFFFF);
+ } else {
+ pcurr_tcb->tcb_tbd_ptr =
+ cpu_to_le32(pcurr_tcb->tcb_phys + 0x10);
+ }
+
+ if (bdp->flags & IS_BACHELOR) {
+ pcurr_tcb->tcb_tbd_expand_ptr =
+ cpu_to_le32(pcurr_tcb->tcb_phys + 0x20);
+ } else {
+ pcurr_tcb->tcb_tbd_expand_ptr =
+ cpu_to_le32(pcurr_tcb->tcb_phys + 0x10);
+ }
+ pcurr_tcb->tcb_tbd_dflt_ptr = pcurr_tcb->tcb_tbd_ptr;
+
+ if (bdp->flags & USE_IPCB) {
+ pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[1]);
+ pcurr_tcb->tcbu.ipcb.ip_activation_high =
+ IPCB_IP_ACTIVATION_DEFAULT;
+ pcurr_tcb->tcbu.ipcb.vlan = 0;
+ } else {
+ pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[0]);
+ }
+
+ pcurr_tcb->tcb_skb = NULL;
+ }
+
+ wmb();
+}
+
+/***************************************************************************/
+/***************************************************************************/
+/* Memory Management Routines */
+/***************************************************************************/
+
+/**
+ * e100_alloc_space - allocate private driver data
+ * @bdp: atapter's private data struct
+ *
+ * This routine allocates memory for the driver. Memory allocated is for the
+ * selftest and statistics structures.
+ *
+ * Returns:
+ * 0: if the operation was successful
+ * %-ENOMEM: if memory allocation failed
+ */
+unsigned char __devinit
+e100_alloc_space(struct e100_private *bdp)
+{
+ unsigned long off;
+
+ /* allocate all the dma-able structures in one call:
+ * selftest results, adapter stats, and non-tx cb commands */
+ if (!(bdp->dma_able =
+ pci_alloc_consistent(bdp->pdev, sizeof (bd_dma_able_t),
+ &(bdp->dma_able_phys)))) {
+ goto err;
+ }
+
+ /* now assign the various pointers into the struct we've just allocated */
+ off = offsetof(bd_dma_able_t, selftest);
+
+ bdp->selftest = (self_test_t *) (bdp->dma_able + off);
+ bdp->selftest_phys = bdp->dma_able_phys + off;
+
+ off = offsetof(bd_dma_able_t, stats_counters);
+
+ bdp->stats_counters = (max_counters_t *) (bdp->dma_able + off);
+ bdp->stat_cnt_phys = bdp->dma_able_phys + off;
+
+ return 0;
+
+err:
+ printk(KERN_ERR
+ "e100: Failed to allocate memory\n");
+ return -ENOMEM;
+}
+
+/**
+ * e100_alloc_tcb_pool - allocate TCB circular list
+ * @bdp: atapter's private data struct
+ *
+ * This routine allocates memory for the circular list of transmit descriptors.
+ *
+ * Returns:
+ * 0: if allocation has failed.
+ * 1: Otherwise.
+ */
+int
+e100_alloc_tcb_pool(struct e100_private *bdp)
+{
+ int stcb = sizeof (tcb_t) * bdp->params.TxDescriptors;
+
+ /* allocate space for the TCBs */
+ if (!(bdp->tcb_pool.data =
+ pci_alloc_consistent(bdp->pdev, stcb, &bdp->tcb_phys)))
+ return 0;
+
+ memset(bdp->tcb_pool.data, 0x00, stcb);
+
+ return 1;
+}
+
+void
+e100_free_tcb_pool(struct e100_private *bdp)
+{
+ pci_free_consistent(bdp->pdev,
+ sizeof (tcb_t) * bdp->params.TxDescriptors,
+ bdp->tcb_pool.data, bdp->tcb_phys);
+ bdp->tcb_phys = 0;
+}
+
+static void
+e100_dealloc_space(struct e100_private *bdp)
+{
+ if (bdp->dma_able) {
+ pci_free_consistent(bdp->pdev, sizeof (bd_dma_able_t),
+ bdp->dma_able, bdp->dma_able_phys);
+ }
+
+ bdp->selftest_phys = 0;
+ bdp->stat_cnt_phys = 0;
+ bdp->dma_able_phys = 0;
+ bdp->dma_able = 0;
+}
+
+static void
+e100_free_rfd_pool(struct e100_private *bdp)
+{
+ struct rx_list_elem *rx_struct;
+
+ while (!list_empty(&(bdp->active_rx_list))) {
+
+ rx_struct = list_entry(bdp->active_rx_list.next,
+ struct rx_list_elem, list_elem);
+ list_del(&(rx_struct->list_elem));
+ pci_unmap_single(bdp->pdev, rx_struct->dma_addr,
+ sizeof (rfd_t), PCI_DMA_TODEVICE);
+ dev_kfree_skb(rx_struct->skb);
+ kfree(rx_struct);
+ }
+
+ while (!list_empty(&(bdp->rx_struct_pool))) {
+ rx_struct = list_entry(bdp->rx_struct_pool.next,
+ struct rx_list_elem, list_elem);
+ list_del(&(rx_struct->list_elem));
+ kfree(rx_struct);
+ }
+}
+
+/**
+ * e100_alloc_rfd_pool - allocate RFDs
+ * @bdp: atapter's private data struct
+ *
+ * Allocates initial pool of skb which holds both rfd and data,
+ * and return a pointer to the head of the list
+ */
+static int
+e100_alloc_rfd_pool(struct e100_private *bdp)
+{
+ struct rx_list_elem *rx_struct;
+ int i;
+
+ INIT_LIST_HEAD(&(bdp->active_rx_list));
+ INIT_LIST_HEAD(&(bdp->rx_struct_pool));
+ bdp->skb_req = bdp->params.RxDescriptors;
+ for (i = 0; i < bdp->skb_req; i++) {
+ rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC);
+ list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));
+ }
+ e100_alloc_skbs(bdp);
+ return !list_empty(&(bdp->active_rx_list));
+
+}
+
+void
+e100_clear_pools(struct e100_private *bdp)
+{
+ bdp->last_tcb = NULL;
+ e100_free_rfd_pool(bdp);
+ e100_free_tcb_pool(bdp);
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* Run Time Functions */
+/*****************************************************************************/
+
+/**
+ * e100_watchdog
+ * @dev: adapter's net_device struct
+ *
+ * This routine runs every 2 seconds and updates our statitics and link state,
+ * and refreshs txthld value.
+ */
+void
+e100_watchdog(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+
+#ifdef E100_CU_DEBUG
+ if (e100_cu_unknown_state(bdp)) {
+ printk(KERN_ERR "e100: %s: CU unknown state in e100_watchdog\n",
+ dev->name);
+ }
+#endif
+ if (!netif_running(dev)) {
+ return;
+ }
+
+ /* check if link state has changed */
+ if (e100_phy_check(bdp)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_ERR
+ "e100: %s NIC Link is Up %d Mbps %s duplex\n",
+ bdp->device->name, bdp->cur_line_speed,
+ (bdp->cur_dplx_mode == HALF_DUPLEX) ?
+ "Half" : "Full");
+
+ e100_config_fc(bdp);
+ e100_config(bdp);
+
+ } else {
+ printk(KERN_ERR "e100: %s NIC Link is Down\n",
+ bdp->device->name);
+ }
+ }
+
+ // toggle the tx queue according to link status
+ // this also resolves a race condition between tx & non-cu cmd flows
+ if (netif_carrier_ok(dev)) {
+ if (netif_running(dev))
+ netif_wake_queue(dev);
+ } else {
+ if (netif_running(dev))
+ netif_stop_queue(dev);
+ /* When changing to non-autoneg, device may lose */
+ /* link with some switches. e100 will try to */
+ /* revover link by sending command to PHY layer */
+ if (bdp->params.e100_speed_duplex != E100_AUTONEG)
+ e100_force_speed_duplex_to_phy(bdp);
+ }
+
+ rmb();
+
+ if (e100_update_stats(bdp)) {
+
+ /* Check if a change in the IFS parameter is needed,
+ and configure the device accordingly */
+ if (bdp->params.b_params & PRM_IFS)
+ e100_manage_adaptive_ifs(bdp);
+
+ /* Now adjust our dynamic tx threshold value */
+ e100_refresh_txthld(bdp);
+
+ /* Now if we are on a 557 and we havn't received any frames then we
+ * should issue a multicast command to reset the RU */
+ if (bdp->rev_id < D101A4_REV_ID) {
+ if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) {
+ e100_set_multi(dev);
+ }
+ }
+
+ /* Update the statistics needed by the upper interface */
+ /* This should be the last statistic related command
+ * as it's async. now */
+ e100_dump_stats_cntrs(bdp);
+ }
+
+ wmb();
+
+ /* relaunch watchdog timer in 2 sec */
+ mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
+
+ if (list_empty(&bdp->active_rx_list))
+ e100_trigger_SWI(bdp);
+}
+
+/**
+ * e100_manage_adaptive_ifs
+ * @bdp: atapter's private data struct
+ *
+ * This routine manages the adaptive Inter-Frame Spacing algorithm
+ * using a state machine.
+ */
+void
+e100_manage_adaptive_ifs(struct e100_private *bdp)
+{
+ static u16 state_table[9][4] = { // rows are states
+ {2, 0, 0, 0}, // state0 // column0: next state if increasing
+ {2, 0, 5, 30}, // state1 // column1: next state if decreasing
+ {5, 1, 5, 30}, // state2 // column2: IFS value for 100 mbit
+ {5, 3, 0, 0}, // state3 // column3: IFS value for 10 mbit
+ {5, 3, 10, 60}, // state4
+ {8, 4, 10, 60}, // state5
+ {8, 6, 0, 0}, // state6
+ {8, 6, 20, 60}, // state7
+ {8, 7, 20, 60} // state8
+ };
+
+ u32 transmits =
+ le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames);
+ u32 collisions =
+ le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll);
+ u32 state = bdp->ifs_state;
+ u32 old_value = bdp->ifs_value;
+ int next_col;
+ u32 min_transmits;
+
+ if (bdp->cur_dplx_mode == FULL_DUPLEX) {
+ bdp->ifs_state = 0;
+ bdp->ifs_value = 0;
+
+ } else { /* Half Duplex */
+ /* Set speed specific parameters */
+ if (bdp->cur_line_speed == 100) {
+ next_col = 2;
+ min_transmits = MIN_NUMBER_OF_TRANSMITS_100;
+
+ } else { /* 10 Mbps */
+ next_col = 3;
+ min_transmits = MIN_NUMBER_OF_TRANSMITS_10;
+ }
+
+ if ((transmits / 32 < collisions)
+ && (transmits > min_transmits)) {
+ state = state_table[state][0]; /* increment */
+
+ } else if (transmits < min_transmits) {
+ state = state_table[state][1]; /* decrement */
+ }
+
+ bdp->ifs_value = state_table[state][next_col];
+ bdp->ifs_state = state;
+ }
+
+ /* If the IFS value has changed, configure the device */
+ if (bdp->ifs_value != old_value) {
+ e100_config_ifs(bdp);
+ e100_config(bdp);
+ }
+}
+
+/**
+ * e100intr - interrupt handler
+ * @irq: the IRQ number
+ * @dev_inst: the net_device struct
+ * @regs: registers (unused)
+ *
+ * This routine is the ISR for the e100 board. It services
+ * the RX & TX queues & starts the RU if it has stopped due
+ * to no resources.
+ */
+void
+e100intr(int irq, void *dev_inst, struct pt_regs *regs)
+{
+ struct net_device *dev;
+ struct e100_private *bdp;
+ u16 intr_status;
+
+ dev = dev_inst;
+ bdp = dev->priv;
+
+ intr_status = readw(&bdp->scb->scb_status);
+ /* If not my interrupt, just return */
+ if (!(intr_status & SCB_STATUS_ACK_MASK) || (intr_status == 0xffff)) {
+ return;
+ }
+
+ /* disable and ack intr */
+ e100_disable_clear_intr(bdp);
+
+ /* the device is closed, don't continue or else bad things may happen. */
+ if (!netif_running(dev)) {
+ e100_set_intr_mask(bdp);
+ return;
+ }
+
+ /* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */
+ if (intr_status & SCB_STATUS_ACK_SWI) {
+ e100_alloc_skbs(bdp);
+ }
+
+ /* do recv work if any */
+ if (intr_status &
+ (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI))
+ bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp);
+
+ /* clean up after tx'ed packets */
+ if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX))
+ e100_tx_srv(bdp);
+
+ e100_set_intr_mask(bdp);
+}
+
+/**
+ * e100_tx_skb_free - free TX skbs resources
+ * @bdp: atapter's private data struct
+ * @tcb: associated tcb of the freed skb
+ *
+ * This routine frees resources of TX skbs.
+ */
+static inline void
+e100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb)
+{
+ if (tcb->tcb_skb) {
+ int i;
+ tbd_t *tbd_arr = tcb->tbd_ptr;
+ int frags = skb_shinfo(tcb->tcb_skb)->nr_frags;
+
+ for (i = 0; i <= frags; i++, tbd_arr++) {
+ pci_unmap_single(bdp->pdev,
+ le32_to_cpu(tbd_arr->tbd_buf_addr),
+ le16_to_cpu(tbd_arr->tbd_buf_cnt),
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb_irq(tcb->tcb_skb);
+ tcb->tcb_skb = NULL;
+ }
+}
+
+/**
+ * e100_tx_srv - service TX queues
+ * @bdp: atapter's private data struct
+ *
+ * This routine services the TX queues. It reclaims the TCB's & TBD's & other
+ * resources used during the transmit of this buffer. It is called from the ISR.
+ * We don't need a tx_lock since we always access buffers which were already
+ * prepared.
+ */
+void
+e100_tx_srv(struct e100_private *bdp)
+{
+ tcb_t *tcb;
+ int i;
+
+ /* go over at most TxDescriptors buffers */
+ for (i = 0; i < bdp->params.TxDescriptors; i++) {
+ tcb = bdp->tcb_pool.data;
+ tcb += bdp->tcb_pool.head;
+
+ rmb();
+
+ /* if the buffer at 'head' is not complete, break */
+ if (!(tcb->tcb_hdr.cb_status &
+ __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
+ break;
+
+ /* service next buffer, clear the out of resource condition */
+ e100_tx_skb_free(bdp, tcb);
+
+ if (netif_running(bdp->device))
+ netif_wake_queue(bdp->device);
+
+ /* if we've caught up with 'tail', break */
+ if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) {
+ break;
+ }
+
+ bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);
+ }
+}
+
+/**
+ * e100_rx_srv - service RX queue
+ * @bdp: atapter's private data struct
+ * @max_number_of_rfds: max number of RFDs to process
+ * @rx_congestion: flag pointer, to inform the calling function of congestion.
+ *
+ * This routine processes the RX interrupt & services the RX queues.
+ * For each successful RFD, it allocates a new msg block, links that
+ * into the RFD list, and sends the old msg upstream.
+ * The new RFD is then put at the end of the free list of RFD's.
+ * It returns the number of serviced RFDs.
+ */
+u32
+e100_rx_srv(struct e100_private *bdp)
+{
+ rfd_t *rfd; /* new rfd, received rfd */
+ int i;
+ u16 rfd_status;
+ struct sk_buff *skb;
+ struct net_device *dev;
+ unsigned int data_sz;
+ struct rx_list_elem *rx_struct;
+ u32 rfd_cnt = 0;
+
+ dev = bdp->device;
+
+ /* current design of rx is as following:
+ * 1. socket buffer (skb) used to pass network packet to upper layer
+ * 2. all HW host memory structures (like RFDs, RBDs and data buffers)
+ * are placed in a skb's data room
+ * 3. when rx process is complete, we change skb internal pointers to exclude
+ * from data area all unrelated things (RFD, RDB) and to leave
+ * just rx'ed packet netto
+ * 4. for each skb passed to upper layer, new one is allocated instead.
+ * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made
+ * (watchdog trigger SWI intr and isr should allocate new skbs)
+ */
+ for (i = 0; i < bdp->params.RxDescriptors; i++) {
+ if (list_empty(&(bdp->active_rx_list))) {
+ break;
+ }
+
+ rx_struct = list_entry(bdp->active_rx_list.next,
+ struct rx_list_elem, list_elem);
+ skb = rx_struct->skb;
+
+ rfd = RFD_POINTER(skb, bdp); /* locate RFD within skb */
+
+ // sync only the RFD header
+ pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
+ bdp->rfd_size, PCI_DMA_FROMDEVICE);
+ rfd_status = le16_to_cpu(rfd->rfd_header.cb_status); /* get RFD's status */
+ if (!(rfd_status & RFD_STATUS_COMPLETE)) /* does not contains data yet - exit */
+ break;
+
+ /* to allow manipulation with current skb we need to unlink it */
+ list_del(&(rx_struct->list_elem));
+
+ /* do not free & unmap badly received packet.
+ * move it to the end of skb list for reuse */
+ if (!(rfd_status & RFD_STATUS_OK)) {
+ e100_add_skb_to_end(bdp, rx_struct);
+ continue;
+ }
+
+ data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff),
+ (sizeof (rfd_t) - bdp->rfd_size));
+
+ /* now sync all the data */
+ pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
+ (data_sz + bdp->rfd_size),
+ PCI_DMA_FROMDEVICE);
+
+ pci_unmap_single(bdp->pdev, rx_struct->dma_addr,
+ sizeof (rfd_t), PCI_DMA_FROMDEVICE);
+
+ list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));
+
+ /* end of dma access to rfd */
+ bdp->skb_req++; /* incr number of requested skbs */
+ e100_alloc_skbs(bdp); /* and get them */
+
+ /* set packet size, excluding checksum (2 last bytes) if it is present */
+ if ((bdp->flags & DF_CSUM_OFFLOAD)
+ && (bdp->rev_id < D102_REV_ID))
+ skb_put(skb, (int) data_sz - 2);
+ else
+ skb_put(skb, (int) data_sz);
+
+ /* set the protocol */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* set the checksum info */
+ if (bdp->flags & DF_CSUM_OFFLOAD) {
+ if (bdp->rev_id >= D102_REV_ID) {
+ skb->ip_summed = e100_D102_check_checksum(rfd);
+ } else {
+ skb->ip_summed = e100_D101M_checksum(bdp, skb);
+ }
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ if(bdp->vlgrp && (rfd_status & CB_STATUS_VLAN)) {
+ vlan_hwaccel_rx(skb, bdp->vlgrp, be16_to_cpu(rfd->vlanid));
+ } else {
+ netif_rx(skb);
+ }
+ dev->last_rx = jiffies;
+ bdp->drv_stats.net_stats.rx_bytes += skb->len;
+
+ rfd_cnt++;
+ } /* end of rfd loop */
+
+ /* restart the RU if it has stopped */
+ if ((readw(&bdp->scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) {
+ e100_start_ru(bdp);
+ }
+
+ return rfd_cnt;
+}
+
+void
+e100_refresh_txthld(struct e100_private *bdp)
+{
+ basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);
+
+ /* as long as tx_per_underrun is not 0, we can go about dynamically *
+ * adjusting the xmit threshold. we stop doing that & resort to defaults
+ * * once the adjustments become meaningless. the value is adjusted by *
+ * dumping the error counters & checking the # of xmit underrun errors *
+ * we've had. */
+ if (bdp->tx_per_underrun) {
+ /* We are going to last values dumped from the dump statistics
+ * command */
+ if (le32_to_cpu(pstat->xmt_gd_frames)) {
+ if (le32_to_cpu(pstat->xmt_uruns)) {
+ /*
+ * if we have had more than one underrun per "DEFAULT #
+ * OF XMITS ALLOWED PER UNDERRUN" good xmits, raise the
+ * THRESHOLD.
+ */
+ if ((le32_to_cpu(pstat->xmt_gd_frames) /
+ le32_to_cpu(pstat->xmt_uruns)) <
+ bdp->tx_per_underrun) {
+ bdp->tx_thld += 3;
+ }
+ }
+
+ /*
+ * if we've had less than one underrun per the DEFAULT number of
+ * of good xmits allowed, lower the THOLD but not less than 0
+ */
+ if (le32_to_cpu(pstat->xmt_gd_frames) >
+ bdp->tx_per_underrun) {
+ bdp->tx_thld--;
+
+ if (bdp->tx_thld < 6)
+ bdp->tx_thld = 6;
+
+ }
+ }
+
+ /* end good xmits */
+ /*
+ * * if our adjustments are becoming unresonable, stop adjusting &
+ * resort * to defaults & pray. A THOLD value > 190 means that the
+ * adapter will * wait for 190*8=1520 bytes in TX FIFO before it
+ * starts xmit. Since * MTU is 1514, it doesn't make any sense for
+ * further increase. */
+ if (bdp->tx_thld >= 190) {
+ bdp->tx_per_underrun = 0;
+ bdp->tx_thld = 189;
+ }
+ } /* end underrun check */
+}
+
+/**
+ * e100_prepare_xmit_buff - prepare a buffer for transmission
+ * @bdp: atapter's private data struct
+ * @skb: skb to send
+ *
+ * This routine prepare a buffer for transmission. It checks
+ * the message length for the appropiate size. It picks up a
+ * free tcb from the TCB pool and sets up the corresponding
+ * TBD's. If the number of fragments are more than the number
+ * of TBD/TCB it copies all the fragments in a coalesce buffer.
+ * It returns a pointer to the prepared TCB.
+ */
+static inline tcb_t *
+e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb)
+{
+ tcb_t *tcb, *prev_tcb;
+
+ tcb = bdp->tcb_pool.data;
+ tcb += TCB_TO_USE(bdp->tcb_pool);
+
+ if (bdp->flags & USE_IPCB) {
+ tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT;
+ tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCP_PACKET;
+ tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCPUDP_CHECKSUM_ENABLE;
+ }
+
+ if(bdp->vlgrp && vlan_tx_tag_present(skb)) {
+ (tcb->tcbu).ipcb.ip_activation_high |= IPCB_INSERTVLAN_ENABLE;
+ (tcb->tcbu).ipcb.vlan = cpu_to_be16(vlan_tx_tag_get(skb));
+ }
+
+ tcb->tcb_hdr.cb_status = 0;
+ tcb->tcb_thrshld = bdp->tx_thld;
+ tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_S_BIT);
+
+ /* Set I (Interrupt) bit on every (TX_FRAME_CNT)th packet */
+ if (!(++bdp->tx_count % TX_FRAME_CNT))
+ tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_I_BIT);
+ else
+ /* Clear I bit on other packets */
+ tcb->tcb_hdr.cb_cmd &= ~__constant_cpu_to_le16(CB_I_BIT);
+
+ tcb->tcb_skb = skb;
+
+#if 0
+ if (skb->ip_summed == CHECKSUM_HW) {
+ const struct iphdr *ip = skb->nh.iph;
+
+ if ((ip->protocol == IPPROTO_TCP) ||
+ (ip->protocol == IPPROTO_UDP)) {
+
+ tcb->tcbu.ipcb.ip_activation_high |=
+ IPCB_HARDWAREPARSING_ENABLE;
+ tcb->tcbu.ipcb.ip_schedule |=
+ IPCB_TCPUDP_CHECKSUM_ENABLE;
+
+ if (ip->protocol == IPPROTO_TCP)
+ tcb->tcbu.ipcb.ip_schedule |= IPCB_TCP_PACKET;
+ }
+ }
+#endif
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ (tcb->tbd_ptr)->tbd_buf_addr =
+ cpu_to_le32(pci_map_single(bdp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+ (tcb->tbd_ptr)->tbd_buf_cnt = cpu_to_le16(skb->len);
+ tcb->tcb_tbd_num = 1;
+ tcb->tcb_tbd_ptr = tcb->tcb_tbd_dflt_ptr;
+ } else {
+ int i;
+ void *addr;
+ tbd_t *tbd_arr_ptr = &(tcb->tbd_ptr[1]);
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+ (tcb->tbd_ptr)->tbd_buf_addr =
+ cpu_to_le32(pci_map_single(bdp->pdev, skb->data,
+ (skb->len - skb->data_len),
+ PCI_DMA_TODEVICE));
+ (tcb->tbd_ptr)->tbd_buf_cnt =
+ cpu_to_le16(skb->len - skb->data_len);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags;
+ i++, tbd_arr_ptr++, frag++) {
+
+ addr = ((void *) page_address(frag->page) +
+ frag->page_offset);
+
+ tbd_arr_ptr->tbd_buf_addr =
+ cpu_to_le32(pci_map_single(bdp->pdev,
+ addr, frag->size,
+ PCI_DMA_TODEVICE));
+ tbd_arr_ptr->tbd_buf_cnt = cpu_to_le16(frag->size);
+ }
+ tcb->tcb_tbd_num = skb_shinfo(skb)->nr_frags + 1;
+ tcb->tcb_tbd_ptr = tcb->tcb_tbd_expand_ptr;
+ }
+
+ /* clear the S-BIT on the previous tcb */
+ prev_tcb = bdp->tcb_pool.data;
+ prev_tcb += PREV_TCB_USED(bdp->tcb_pool);
+ prev_tcb->tcb_hdr.cb_cmd &= __constant_cpu_to_le16((u16) ~CB_S_BIT);
+
+ bdp->tcb_pool.tail = NEXT_TCB_TOUSE(bdp->tcb_pool.tail);
+
+ wmb();
+
+ e100_start_cu(bdp, tcb);
+
+ return tcb;
+}
+
+/* Changed for 82558 enhancement */
+/**
+ * e100_start_cu - start the adapter's CU
+ * @bdp: atapter's private data struct
+ * @tcb: TCB to be transmitted
+ *
+ * This routine issues a CU Start or CU Resume command to the 82558/9.
+ * This routine was added because the prepare_ext_xmit_buff takes advantage
+ * of the 82558/9's Dynamic TBD chaining feature and has to start the CU as
+ * soon as the first TBD is ready.
+ *
+ * e100_start_cu must be called while holding the tx_lock !
+ */
+u8
+e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
+{
+ unsigned long lock_flag;
+ u8 ret = true;
+
+ spin_lock_irqsave(&(bdp->bd_lock), lock_flag);
+ switch (bdp->next_cu_cmd) {
+ case RESUME_NO_WAIT:
+ /*last cu command was a CU_RESMUE if this is a 558 or newer we don't need to
+ * wait for command word to clear, we reach here only if we are bachlor
+ */
+ e100_exec_cmd(bdp, SCB_CUC_RESUME);
+ break;
+
+ case RESUME_WAIT:
+ if ((bdp->flags & IS_ICH) &&
+ (bdp->cur_line_speed == 10) &&
+ (bdp->cur_dplx_mode == HALF_DUPLEX)) {
+ e100_wait_exec_simple(bdp, SCB_CUC_NOOP);
+ udelay(1);
+ }
+ if ((e100_wait_exec_simple(bdp, SCB_CUC_RESUME)) &&
+ (bdp->flags & IS_BACHELOR) && (!(bdp->flags & IS_ICH))) {
+ bdp->next_cu_cmd = RESUME_NO_WAIT;
+ }
+ break;
+
+ case START_WAIT:
+ // The last command was a non_tx CU command
+ if (!e100_wait_cus_idle(bdp))
+ printk(KERN_DEBUG
+ "e100: %s: cu_start: timeout waiting for cu\n",
+ bdp->device->name);
+ if (!e100_wait_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
+ SCB_CUC_START, CB_TRANSMIT)) {
+ printk(KERN_DEBUG
+ "e100: %s: cu_start: timeout waiting for scb\n",
+ bdp->device->name);
+ e100_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
+ SCB_CUC_START);
+ ret = false;
+ }
+
+ bdp->next_cu_cmd = RESUME_WAIT;
+
+ break;
+ }
+
+ /* save the last tcb */
+ bdp->last_tcb = tcb;
+
+ spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
+ return ret;
+}
+
+/* ====================================================================== */
+/* hw */
+/* ====================================================================== */
+
+/**
+ * e100_selftest - perform H/W self test
+ * @bdp: atapter's private data struct
+ * @st_timeout: address to return timeout value, if fails
+ * @st_result: address to return selftest result, if fails
+ *
+ * This routine will issue PORT Self-test command to test the e100.
+ * The self-test will fail if the adapter's master-enable bit is not
+ * set in the PCI Command Register, or if the adapter is not seated
+ * in a PCI master-enabled slot. we also disable interrupts when the
+ * command is completed.
+ *
+ * Returns:
+ * true: if adapter passes self_test
+ * false: otherwise
+ */
+unsigned char
+e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
+{
+ u32 selftest_cmd;
+
+ /* initialize the nic state before running test */
+ e100_sw_reset(bdp, PORT_SOFTWARE_RESET);
+ /* Setup the address of the self_test area */
+ selftest_cmd = bdp->selftest_phys;
+
+ /* Setup SELF TEST Command Code in D3 - D0 */
+ selftest_cmd |= PORT_SELFTEST;
+
+ /* Initialize the self-test signature and results DWORDS */
+ bdp->selftest->st_sign = 0;
+ bdp->selftest->st_result = 0xffffffff;
+
+ /* Do the port command */
+ writel(selftest_cmd, &bdp->scb->scb_port);
+ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
+
+ /* Wait at least 10 milliseconds for the self-test to complete */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 100 + 1);
+
+ /* disable interrupts since they are enabled */
+ /* after device reset during selftest */
+ e100_disable_clear_intr(bdp);
+
+ /* if The First Self Test DWORD Still Zero, We've timed out. If the
+ * second DWORD is not zero then we have an error. */
+ if ((bdp->selftest->st_sign == 0) || (bdp->selftest->st_result != 0)) {
+
+ if (st_timeout)
+ *st_timeout = !(le32_to_cpu(bdp->selftest->st_sign));
+
+ if (st_result)
+ *st_result = le32_to_cpu(bdp->selftest->st_result);
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * e100_setup_iaaddr - issue IA setup sommand
+ * @bdp: atapter's private data struct
+ * @eaddr: new ethernet address
+ *
+ * This routine will issue the IA setup command. This command
+ * will notify the 82557 (e100) of what its individual (node)
+ * address is. This command will be executed in polled mode.
+ *
+ * Returns:
+ * true: if the IA setup command was successfully issued and completed
+ * false: otherwise
+ */
+unsigned char
+e100_setup_iaaddr(struct e100_private *bdp, u8 *eaddr)
+{
+ unsigned int i;
+ cb_header_t *ntcb_hdr;
+ unsigned char res;
+ nxmit_cb_entry_t *cmd;
+
+ if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
+ res = false;
+ goto exit;
+ }
+
+ ntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
+ ntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_IA_ADDRESS);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ (cmd->non_tx_cmd)->ntcb.setup.ia_addr[i] = eaddr[i];
+ }
+
+ res = e100_exec_non_cu_cmd(bdp, cmd);
+ if (!res)
+ printk(KERN_WARNING "e100: %s: IA setup failed\n",
+ bdp->device->name);
+
+exit:
+ return res;
+}
+
+/**
+ * e100_start_ru - start the RU if needed
+ * @bdp: atapter's private data struct
+ *
+ * This routine checks the status of the 82557's receive unit(RU),
+ * and starts the RU if it was not already active. However,
+ * before restarting the RU, the driver gives the RU the buffers
+ * it freed up during the servicing of the ISR. If there are
+ * no free buffers to give to the RU, (i.e. we have reached a
+ * no resource condition) the RU will not be started till the
+ * next ISR.
+ */
+void
+e100_start_ru(struct e100_private *bdp)
+{
+ struct rx_list_elem *rx_struct = NULL;
+ int buffer_found = 0;
+ struct list_head *entry_ptr;
+
+ list_for_each(entry_ptr, &(bdp->active_rx_list)) {
+ rx_struct =
+ list_entry(entry_ptr, struct rx_list_elem, list_elem);
+ pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
+ bdp->rfd_size, PCI_DMA_FROMDEVICE);
+ if (!((SKB_RFD_STATUS(rx_struct->skb, bdp) &
+ __constant_cpu_to_le16(RFD_STATUS_COMPLETE)))) {
+ buffer_found = 1;
+ break;
+ }
+ }
+
+ /* No available buffers */
+ if (!buffer_found) {
+ return;
+ }
+
+ spin_lock(&bdp->bd_lock);
+
+ if (!e100_wait_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START, 0)) {
+ printk(KERN_DEBUG
+ "e100: %s: start_ru: wait_scb failed\n",
+ bdp->device->name);
+ e100_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START);
+ }
+ if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
+ bdp->next_cu_cmd = RESUME_WAIT;
+ }
+ spin_unlock(&bdp->bd_lock);
+}
+
+/**
+ * e100_cmd_complete_location
+ * @bdp: atapter's private data struct
+ *
+ * This routine returns a pointer to the location of the command-complete
+ * DWord in the dump statistical counters area, according to the statistical
+ * counters mode (557 - basic, 558 - extended, or 559 - TCO mode).
+ * See e100_config_init() for the setting of the statistical counters mode.
+ */
+static u32 *
+e100_cmd_complete_location(struct e100_private *bdp)
+{
+ u32 *cmd_complete;
+ max_counters_t *stats = bdp->stats_counters;
+
+ switch (bdp->stat_mode) {
+ case E100_EXTENDED_STATS:
+ cmd_complete =
+ (u32 *) &(((err_cntr_558_t *) (stats))->cmd_complete);
+ break;
+
+ case E100_TCO_STATS:
+ cmd_complete =
+ (u32 *) &(((err_cntr_559_t *) (stats))->cmd_complete);
+ break;
+
+ case E100_BASIC_STATS:
+ default:
+ cmd_complete =
+ (u32 *) &(((err_cntr_557_t *) (stats))->cmd_complete);
+ break;
+ }
+
+ return cmd_complete;
+}
+
+/**
+ * e100_clr_cntrs - clear statistics counters
+ * @bdp: atapter's private data struct
+ *
+ * This routine will clear the adapter error statistic counters.
+ *
+ * Returns:
+ * true: if successfully cleared stat counters
+ * false: otherwise
+ */
+static unsigned char __devinit
+e100_clr_cntrs(struct e100_private *bdp)
+{
+ volatile u32 *pcmd_complete;
+
+ /* clear the dump counter complete word */
+ pcmd_complete = e100_cmd_complete_location(bdp);
+ *pcmd_complete = 0;
+ wmb();
+
+ if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
+ return false;
+
+ /* wait 10 microseconds for the command to complete */
+ udelay(10);
+
+ if (!e100_wait_exec_simple(bdp, SCB_CUC_DUMP_RST_STAT))
+ return false;
+
+ if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
+ bdp->next_cu_cmd = RESUME_WAIT;
+ }
+
+ return true;
+}
+
+static unsigned char
+e100_update_stats(struct e100_private *bdp)
+{
+ u32 *pcmd_complete;
+ basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);
+
+ // check if last dump command completed
+ pcmd_complete = e100_cmd_complete_location(bdp);
+ if (*pcmd_complete != le32_to_cpu(DUMP_RST_STAT_COMPLETED) &&
+ *pcmd_complete != le32_to_cpu(DUMP_STAT_COMPLETED)) {
+ return false;
+ }
+
+ /* increment the statistics */
+ bdp->drv_stats.net_stats.rx_packets +=
+ le32_to_cpu(pstat->rcv_gd_frames);
+ bdp->drv_stats.net_stats.tx_packets +=
+ le32_to_cpu(pstat->xmt_gd_frames);
+ bdp->drv_stats.net_stats.rx_dropped += le32_to_cpu(pstat->rcv_rsrc_err);
+ bdp->drv_stats.net_stats.collisions += le32_to_cpu(pstat->xmt_ttl_coll);
+ bdp->drv_stats.net_stats.rx_length_errors +=
+ le32_to_cpu(pstat->rcv_shrt_frames);
+ bdp->drv_stats.net_stats.rx_over_errors +=
+ le32_to_cpu(pstat->rcv_rsrc_err);
+ bdp->drv_stats.net_stats.rx_crc_errors +=
+ le32_to_cpu(pstat->rcv_crc_errs);
+ bdp->drv_stats.net_stats.rx_frame_errors +=
+ le32_to_cpu(pstat->rcv_algn_errs);
+ bdp->drv_stats.net_stats.rx_fifo_errors +=
+ le32_to_cpu(pstat->rcv_oruns);
+ bdp->drv_stats.net_stats.tx_aborted_errors +=
+ le32_to_cpu(pstat->xmt_max_coll);
+ bdp->drv_stats.net_stats.tx_carrier_errors +=
+ le32_to_cpu(pstat->xmt_lost_crs);
+ bdp->drv_stats.net_stats.tx_fifo_errors +=
+ le32_to_cpu(pstat->xmt_uruns);
+
+ bdp->drv_stats.tx_late_col += le32_to_cpu(pstat->xmt_late_coll);
+ bdp->drv_stats.tx_ok_defrd += le32_to_cpu(pstat->xmt_deferred);
+ bdp->drv_stats.tx_one_retry += le32_to_cpu(pstat->xmt_sngl_coll);
+ bdp->drv_stats.tx_mt_one_retry += le32_to_cpu(pstat->xmt_mlt_coll);
+ bdp->drv_stats.rcv_cdt_frames += le32_to_cpu(pstat->rcv_err_coll);
+
+ if (bdp->stat_mode != E100_BASIC_STATS) {
+ ext_cntr_t *pex_stat = &bdp->stats_counters->extended_stats;
+
+ bdp->drv_stats.xmt_fc_pkts +=
+ le32_to_cpu(pex_stat->xmt_fc_frames);
+ bdp->drv_stats.rcv_fc_pkts +=
+ le32_to_cpu(pex_stat->rcv_fc_frames);
+ bdp->drv_stats.rcv_fc_unsupported +=
+ le32_to_cpu(pex_stat->rcv_fc_unsupported);
+ }
+
+ if (bdp->stat_mode == E100_TCO_STATS) {
+ tco_cntr_t *ptco_stat = &bdp->stats_counters->tco_stats;
+
+ bdp->drv_stats.xmt_tco_pkts +=
+ le16_to_cpu(ptco_stat->xmt_tco_frames);
+ bdp->drv_stats.rcv_tco_pkts +=
+ le16_to_cpu(ptco_stat->rcv_tco_frames);
+ }
+
+ *pcmd_complete = 0;
+ return true;
+}
+
+/**
+ * e100_dump_stat_cntrs
+ * @bdp: atapter's private data struct
+ *
+ * This routine will dump the board statistical counters without waiting
+ * for stat_dump to complete. Any access to this stats should verify the completion
+ * of the command
+ */
+void
+e100_dump_stats_cntrs(struct e100_private *bdp)
+{
+ unsigned long lock_flag_bd;
+
+ spin_lock_irqsave(&(bdp->bd_lock), lock_flag_bd);
+
+ /* dump h/w stats counters */
+ if (e100_wait_exec_simple(bdp, SCB_CUC_DUMP_RST_STAT)) {
+ if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
+ bdp->next_cu_cmd = RESUME_WAIT;
+ }
+ }
+
+ spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag_bd);
+}
+
+/**
+ * e100_exec_non_cu_cmd
+ * @bdp: atapter's private data struct
+ * @command: the non-cu command to execute
+ *
+ * This routine will submit a command block to be executed,
+ */
+unsigned char
+e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
+{
+ cb_header_t *ntcb_hdr;
+ unsigned long lock_flag;
+ unsigned long expiration_time;
+ unsigned char rc = true;
+ u8 sub_cmd;
+
+ ntcb_hdr = (cb_header_t *) command->non_tx_cmd; /* get hdr of non tcb cmd */
+ sub_cmd = cpu_to_le16(ntcb_hdr->cb_cmd);
+
+ /* Set the Command Block to be the last command block */
+ ntcb_hdr->cb_cmd |= __constant_cpu_to_le16(CB_EL_BIT);
+ ntcb_hdr->cb_status = 0;
+ ntcb_hdr->cb_lnk_ptr = 0;
+
+ wmb();
+ if (in_interrupt())
+ return e100_delayed_exec_non_cu_cmd(bdp, command);
+
+ if (netif_running(bdp->device) && netif_carrier_ok(bdp->device))
+ return e100_delayed_exec_non_cu_cmd(bdp, command);
+
+ spin_lock_bh(&(bdp->bd_non_tx_lock));
+
+ if (bdp->non_tx_command_state != E100_NON_TX_IDLE) {
+ goto delayed_exec;
+ }
+
+ if (bdp->last_tcb) {
+ rmb();
+ if ((bdp->last_tcb->tcb_hdr.cb_status &
+ __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
+ goto delayed_exec;
+ }
+
+ if ((readw(&bdp->scb->scb_status) & SCB_CUS_MASK) == SCB_CUS_ACTIVE) {
+ goto delayed_exec;
+ }
+
+ spin_lock_irqsave(&bdp->bd_lock, lock_flag);
+
+ if (!e100_wait_exec_cmplx(bdp, command->dma_addr, SCB_CUC_START, sub_cmd)) {
+ spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
+ rc = false;
+ goto exit;
+ }
+
+ bdp->next_cu_cmd = START_WAIT;
+ spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
+
+ /* now wait for completion of non-cu CB up to 20 msec */
+ expiration_time = jiffies + HZ / 50 + 1;
+ rmb();
+ while (!(ntcb_hdr->cb_status &
+ __constant_cpu_to_le16(CB_STATUS_COMPLETE))) {
+
+ if (time_before(jiffies, expiration_time)) {
+ spin_unlock_bh(&(bdp->bd_non_tx_lock));
+ yield();
+ spin_lock_bh(&(bdp->bd_non_tx_lock));
+ } else {
+#ifdef E100_CU_DEBUG
+ printk(KERN_ERR "e100: %s: non-TX command (%x) "
+ "timeout\n", bdp->device->name, sub_cmd);
+#endif
+ rc = false;
+ goto exit;
+ }
+ rmb();
+ }
+
+exit:
+ e100_free_non_tx_cmd(bdp, command);
+
+ if (netif_running(bdp->device))
+ netif_wake_queue(bdp->device);
+
+ spin_unlock_bh(&(bdp->bd_non_tx_lock));
+ return rc;
+
+delayed_exec:
+ spin_unlock_bh(&(bdp->bd_non_tx_lock));
+ return e100_delayed_exec_non_cu_cmd(bdp, command);
+}
+
+/**
+ * e100_sw_reset
+ * @bdp: atapter's private data struct
+ * @reset_cmd: s/w reset or selective reset
+ *
+ * This routine will issue a software reset to the adapter. It
+ * will also disable interrupts, as the are enabled after reset.
+ */
+void
+e100_sw_reset(struct e100_private *bdp, u32 reset_cmd)
+{
+ /* Do a selective reset first to avoid a potential PCI hang */
+ writel(PORT_SELECTIVE_RESET, &bdp->scb->scb_port);
+ readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
+
+ /* wait for the reset to take effect */
+ udelay(20);
+ if (reset_cmd == PORT_SOFTWARE_RESET) {
+ writel(PORT_SOFTWARE_RESET, &bdp->scb->scb_port);
+
+ /* wait 20 micro seconds for the reset to take effect */
+ udelay(20);
+ }
+
+ /* Mask off our interrupt line -- it is unmasked after reset */
+ e100_disable_clear_intr(bdp);
+#ifdef E100_CU_DEBUG
+ bdp->last_cmd = 0;
+ bdp->last_sub_cmd = 0;
+#endif
+}
+
+/**
+ * e100_load_microcode - Download microsocde to controller.
+ * @bdp: atapter's private data struct
+ *
+ * This routine downloads microcode on to the controller. This
+ * microcode is available for the 82558/9, 82550. Currently the
+ * microcode handles interrupt bundling and TCO workaround.
+ *
+ * Returns:
+ * true: if successfull
+ * false: otherwise
+ */
+static unsigned char
+e100_load_microcode(struct e100_private *bdp)
+{
+ static struct {
+ u8 rev_id;
+ u32 ucode[UCODE_MAX_DWORDS + 1];
+ int timer_dword;
+ int bundle_dword;
+ int min_size_dword;
+ } ucode_opts[] = {
+ { D101A4_REV_ID,
+ D101_A_RCVBUNDLE_UCODE,
+ D101_CPUSAVER_TIMER_DWORD,
+ D101_CPUSAVER_BUNDLE_DWORD,
+ D101_CPUSAVER_MIN_SIZE_DWORD },
+ { D101B0_REV_ID,
+ D101_B0_RCVBUNDLE_UCODE,
+ D101_CPUSAVER_TIMER_DWORD,
+ D101_CPUSAVER_BUNDLE_DWORD,
+ D101_CPUSAVER_MIN_SIZE_DWORD },
+ { D101MA_REV_ID,
+ D101M_B_RCVBUNDLE_UCODE,
+ D101M_CPUSAVER_TIMER_DWORD,
+ D101M_CPUSAVER_BUNDLE_DWORD,
+ D101M_CPUSAVER_MIN_SIZE_DWORD },
+ { D101S_REV_ID,
+ D101S_RCVBUNDLE_UCODE,
+ D101S_CPUSAVER_TIMER_DWORD,
+ D101S_CPUSAVER_BUNDLE_DWORD,
+ D101S_CPUSAVER_MIN_SIZE_DWORD },
+ { D102_REV_ID,
+ D102_B_RCVBUNDLE_UCODE,
+ D102_B_CPUSAVER_TIMER_DWORD,
+ D102_B_CPUSAVER_BUNDLE_DWORD,
+ D102_B_CPUSAVER_MIN_SIZE_DWORD },
+ { D102C_REV_ID,
+ D102_C_RCVBUNDLE_UCODE,
+ D102_C_CPUSAVER_TIMER_DWORD,
+ D102_C_CPUSAVER_BUNDLE_DWORD,
+ D102_C_CPUSAVER_MIN_SIZE_DWORD },
+ { D102E_REV_ID,
+ D102_E_RCVBUNDLE_UCODE,
+ D102_E_CPUSAVER_TIMER_DWORD,
+ D102_E_CPUSAVER_BUNDLE_DWORD,
+ D102_E_CPUSAVER_MIN_SIZE_DWORD },
+ { 0, {0}, 0, 0, 0}
+ }, *opts;
+
+ opts = ucode_opts;
+
+ /* User turned ucode loading off */
+ if (!(bdp->params.b_params & PRM_UCODE))
+ return false;
+
+ /* These controllers do not need ucode */
+ if (bdp->flags & IS_ICH)
+ return false;
+
+ /* Search for ucode match against h/w rev_id */
+ while (opts->rev_id) {
+ if (bdp->rev_id == opts->rev_id) {
+ int i;
+ u32 *ucode_dword;
+ load_ucode_cb_t *ucode_cmd_ptr;
+ nxmit_cb_entry_t *cmd = e100_alloc_non_tx_cmd(bdp);
+
+ if (cmd != NULL) {
+ ucode_cmd_ptr =
+ (load_ucode_cb_t *) cmd->non_tx_cmd;
+ ucode_dword = ucode_cmd_ptr->ucode_dword;
+ } else {
+ return false;
+ }
+
+ memcpy(ucode_dword, opts->ucode, sizeof (opts->ucode));
+
+ /* Insert user-tunable settings */
+ ucode_dword[opts->timer_dword] &= 0xFFFF0000;
+ ucode_dword[opts->timer_dword] |=
+ (u16) bdp->params.IntDelay;
+ ucode_dword[opts->bundle_dword] &= 0xFFFF0000;
+ ucode_dword[opts->bundle_dword] |=
+ (u16) bdp->params.BundleMax;
+ ucode_dword[opts->min_size_dword] &= 0xFFFF0000;
+ ucode_dword[opts->min_size_dword] |=
+ (bdp->params.b_params & PRM_BUNDLE_SMALL) ?
+ 0xFFFF : 0xFF80;
+
+ for (i = 0; i < UCODE_MAX_DWORDS; i++)
+ cpu_to_le32s(&(ucode_dword[i]));
+
+ ucode_cmd_ptr->load_ucode_cbhdr.cb_cmd =
+ __constant_cpu_to_le16(CB_LOAD_MICROCODE);
+
+ return e100_exec_non_cu_cmd(bdp, cmd);
+ }
+ opts++;
+ }
+
+ return false;
+}
+
+/***************************************************************************/
+/***************************************************************************/
+/* EEPROM Functions */
+/***************************************************************************/
+
+/* Read PWA (printed wired assembly) number */
+void __devinit
+e100_rd_pwa_no(struct e100_private *bdp)
+{
+ bdp->pwa_no = e100_eeprom_read(bdp, EEPROM_PWA_NO);
+ bdp->pwa_no <<= 16;
+ bdp->pwa_no |= e100_eeprom_read(bdp, EEPROM_PWA_NO + 1);
+}
+
+/* Read the permanent ethernet address from the eprom. */
+void __devinit
+e100_rd_eaddr(struct e100_private *bdp)
+{
+ int i;
+ u16 eeprom_word;
+
+ for (i = 0; i < 6; i += 2) {
+ eeprom_word =
+ e100_eeprom_read(bdp,
+ EEPROM_NODE_ADDRESS_BYTE_0 + (i / 2));
+
+ bdp->device->dev_addr[i] =
+ bdp->perm_node_address[i] = (u8) eeprom_word;
+ bdp->device->dev_addr[i + 1] =
+ bdp->perm_node_address[i + 1] = (u8) (eeprom_word >> 8);
+ }
+}
+
+/* Check the D102 RFD flags to see if the checksum passed */
+static unsigned char
+e100_D102_check_checksum(rfd_t *rfd)
+{
+ if (((le16_to_cpu(rfd->rfd_header.cb_status)) & RFD_PARSE_BIT)
+ && (((rfd->rcvparserstatus & CHECKSUM_PROTOCOL_MASK) ==
+ RFD_TCP_PACKET)
+ || ((rfd->rcvparserstatus & CHECKSUM_PROTOCOL_MASK) ==
+ RFD_UDP_PACKET))
+ && (rfd->checksumstatus & TCPUDP_CHECKSUM_BIT_VALID)
+ && (rfd->checksumstatus & TCPUDP_CHECKSUM_VALID)) {
+ return CHECKSUM_UNNECESSARY;
+ }
+ return CHECKSUM_NONE;
+}
+
+/**
+ * e100_D101M_checksum
+ * @bdp: atapter's private data struct
+ * @skb: skb received
+ *
+ * Sets the skb->csum value from D101 csum found at the end of the Rx frame. The
+ * D101M sums all words in frame excluding the ethernet II header (14 bytes) so
+ * in case the packet is ethernet II and the protocol is IP, all is need is to
+ * assign this value to skb->csum.
+ */
+static unsigned char
+e100_D101M_checksum(struct e100_private *bdp, struct sk_buff *skb)
+{
+ unsigned short proto = (skb->protocol);
+
+ if (proto == __constant_htons(ETH_P_IP)) {
+
+ skb->csum = get_unaligned((u16 *) (skb->tail));
+ return CHECKSUM_HW;
+ }
+ return CHECKSUM_NONE;
+}
+
+/***************************************************************************/
+/***************************************************************************/
+/***************************************************************************/
+/***************************************************************************/
+/* Auxilary Functions */
+/***************************************************************************/
+
+/* Print the board's configuration */
+void __devinit
+e100_print_brd_conf(struct e100_private *bdp)
+{
+ /* Print the string if checksum Offloading was enabled */
+ if (bdp->flags & DF_CSUM_OFFLOAD)
+ printk(KERN_NOTICE " Hardware receive checksums enabled\n");
+ else {
+ if (bdp->rev_id >= D101MA_REV_ID)
+ printk(KERN_NOTICE " Hardware receive checksums disabled\n");
+ }
+
+ if ((bdp->flags & DF_UCODE_LOADED))
+ printk(KERN_NOTICE " cpu cycle saver enabled\n");
+}
+
+/**
+ * e100_pci_setup - setup the adapter's PCI information
+ * @pcid: adapter's pci_dev struct
+ * @bdp: atapter's private data struct
+ *
+ * This routine sets up all PCI information for the adapter. It enables the bus
+ * master bit (some BIOS don't do this), requests memory ans I/O regions, and
+ * calls ioremap() on the adapter's memory region.
+ *
+ * Returns:
+ * true: if successfull
+ * false: otherwise
+ */
+static unsigned char __devinit
+e100_pci_setup(struct pci_dev *pcid, struct e100_private *bdp)
+{
+ struct net_device *dev = bdp->device;
+ int rc = 0;
+
+ if ((rc = pci_enable_device(pcid)) != 0) {
+ goto err;
+ }
+
+ /* dev and ven ID have already been checked so it is our device */
+ pci_read_config_byte(pcid, PCI_REVISION_ID, (u8 *) &(bdp->rev_id));
+
+ /* address #0 is a memory region */
+ dev->mem_start = pci_resource_start(pcid, 0);
+ dev->mem_end = dev->mem_start + sizeof (scb_t);
+
+ /* address #1 is a IO region */
+ dev->base_addr = pci_resource_start(pcid, 1);
+
+ if ((rc = pci_request_regions(pcid, e100_short_driver_name)) != 0) {
+ goto err_disable;
+ }
+
+ pci_enable_wake(pcid, 0, 0);
+
+ /* if Bus Mastering is off, turn it on! */
+ pci_set_master(pcid);
+
+ /* address #0 is a memory mapping */
+ bdp->scb = (scb_t *) ioremap_nocache(dev->mem_start, sizeof (scb_t));
+
+ if (!bdp->scb) {
+ printk(KERN_ERR "e100: %s: Failed to map PCI address 0x%lX\n",
+ dev->name, pci_resource_start(pcid, 0));
+ rc = -ENOMEM;
+ goto err_region;
+ }
+
+ return 0;
+
+err_region:
+ pci_release_regions(pcid);
+err_disable:
+ pci_disable_device(pcid);
+err:
+ return rc;
+}
+
+void
+e100_isolate_driver(struct e100_private *bdp)
+{
+
+ /* Check if interface is up */
+ /* NOTE: Can't use netif_running(bdp->device) because */
+ /* dev_close clears __LINK_STATE_START before calling */
+ /* e100_close (aka dev->stop) */
+ if (bdp->device->flags & IFF_UP) {
+ e100_disable_clear_intr(bdp);
+ del_timer_sync(&bdp->watchdog_timer);
+ netif_carrier_off(bdp->device);
+ netif_stop_queue(bdp->device);
+ bdp->last_tcb = NULL;
+ }
+ e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
+}
+
+void
+e100_set_speed_duplex(struct e100_private *bdp)
+{
+ int carrier_ok;
+ /* Device may lose link with some siwtches when */
+ /* changing speed/duplex to non-autoneg. e100 */
+ /* needs to remember carrier state in order to */
+ /* start watchdog timer for recovering link */
+ if ((carrier_ok = netif_carrier_ok(bdp->device)))
+ e100_isolate_driver(bdp);
+ e100_phy_set_speed_duplex(bdp, true);
+ e100_config_fc(bdp); /* re-config flow-control if necessary */
+ e100_config(bdp);
+ if (carrier_ok)
+ e100_deisolate_driver(bdp, false);
+}
+
+static void
+e100_tcb_add_C_bit(struct e100_private *bdp)
+{
+ tcb_t *tcb = (tcb_t *) bdp->tcb_pool.data;
+ int i;
+
+ for (i = 0; i < bdp->params.TxDescriptors; i++, tcb++) {
+ tcb->tcb_hdr.cb_status |= cpu_to_le16(CB_STATUS_COMPLETE);
+ }
+}
+
+/*
+ * Procedure: e100_configure_device
+ *
+ * Description: This routine will configure device
+ *
+ * Arguments:
+ * bdp - Ptr to this card's e100_bdconfig structure
+ *
+ * Returns:
+ * true upon success
+ * false upon failure
+ */
+unsigned char
+e100_configure_device(struct e100_private *bdp)
+{
+ /*load CU & RU base */
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
+ return false;
+
+ if (e100_load_microcode(bdp))
+ bdp->flags |= DF_UCODE_LOADED;
+
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
+ return false;
+
+ /* Issue the load dump counters address command */
+ if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
+ return false;
+
+ if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr)) {
+ printk(KERN_ERR "e100: e100_configure_device: "
+ "setup iaaddr failed\n");
+ return false;
+ }
+
+ e100_set_multi_exec(bdp->device);
+
+ /* Change for 82558 enhancement */
+ /* If 82558/9 and if the user has enabled flow control, set up */
+ /* flow Control Reg. in the CSR */
+ if ((bdp->flags & IS_BACHELOR)
+ && (bdp->params.b_params & PRM_FC)) {
+ writeb(DFLT_FC_THLD,
+ &bdp->scb->scb_ext.d101_scb.scb_fc_thld);
+ writeb(DFLT_FC_CMD,
+ &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
+ }
+
+ e100_force_config(bdp);
+
+ return true;
+}
+
+void
+e100_deisolate_driver(struct e100_private *bdp, u8 full_reset)
+{
+ u32 cmd = full_reset ? PORT_SOFTWARE_RESET : PORT_SELECTIVE_RESET;
+ e100_sw_reset(bdp, cmd);
+ if (cmd == PORT_SOFTWARE_RESET) {
+ if (!e100_configure_device(bdp))
+ printk(KERN_ERR "e100: e100_deisolate_driver:"
+ " device configuration failed\n");
+ }
+
+ if (netif_running(bdp->device)) {
+
+ bdp->next_cu_cmd = START_WAIT;
+ bdp->last_tcb = NULL;
+
+ e100_start_ru(bdp);
+
+ /* relaunch watchdog timer in 2 sec */
+ mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
+
+ // we must clear tcbs since we may have lost Tx intrrupt
+ // or have unsent frames on the tcb chain
+ e100_tcb_add_C_bit(bdp);
+ e100_tx_srv(bdp);
+ netif_wake_queue(bdp->device);
+ e100_set_intr_mask(bdp);
+ }
+}
+
+#if 0
+static int
+e100_do_ethtool_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct ethtool_cmd ecmd;
+ int rc = -EOPNOTSUPP;
+
+ if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd.cmd)))
+ return -EFAULT;
+
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET:
+ rc = e100_ethtool_get_settings(dev, ifr);
+ break;
+ case ETHTOOL_SSET:
+ rc = e100_ethtool_set_settings(dev, ifr);
+ break;
+ case ETHTOOL_GDRVINFO:
+ rc = e100_ethtool_get_drvinfo(dev, ifr);
+ break;
+ case ETHTOOL_GREGS:
+ rc = e100_ethtool_gregs(dev, ifr);
+ break;
+ case ETHTOOL_NWAY_RST:
+ rc = e100_ethtool_nway_rst(dev, ifr);
+ break;
+ case ETHTOOL_GLINK:
+ rc = e100_ethtool_glink(dev, ifr);
+ break;
+ case ETHTOOL_GEEPROM:
+ case ETHTOOL_SEEPROM:
+ rc = e100_ethtool_eeprom(dev, ifr);
+ break;
+ case ETHTOOL_GSTATS: {
+ struct {
+ struct ethtool_stats cmd;
+ uint64_t data[E100_STATS_LEN];
+ } stats = { {ETHTOOL_GSTATS, E100_STATS_LEN} };
+ struct e100_private *bdp = dev->priv;
+ void *addr = ifr->ifr_data;
+ int i;
+
+ for(i = 0; i < E100_STATS_LEN; i++)
+ stats.data[i] =
+ ((unsigned long *)&bdp->drv_stats.net_stats)[i];
+ if(copy_to_user(addr, &stats, sizeof(stats)))
+ return -EFAULT;
+ return 0;
+ }
+ case ETHTOOL_GWOL:
+ case ETHTOOL_SWOL:
+ rc = e100_ethtool_wol(dev, ifr);
+ break;
+ case ETHTOOL_TEST:
+ rc = e100_ethtool_test(dev, ifr);
+ break;
+ case ETHTOOL_GSTRINGS:
+ rc = e100_ethtool_gstrings(dev,ifr);
+ break;
+ case ETHTOOL_PHYS_ID:
+ rc = e100_ethtool_led_blink(dev,ifr);
+ break;
+ default:
+ break;
+ } //switch
+ return rc;
+}
+
+static int
+e100_ethtool_get_settings(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ struct ethtool_cmd ecmd;
+ u16 advert = 0;
+
+ memset((void *) &ecmd, 0, sizeof (ecmd));
+
+ bdp = dev->priv;
+
+ ecmd.supported = bdp->speed_duplex_caps;
+
+ ecmd.port =
+ (bdp->speed_duplex_caps & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.phy_address = bdp->phy_addr;
+
+ if (netif_carrier_ok(bdp->device)) {
+ ecmd.speed = bdp->cur_line_speed;
+ ecmd.duplex =
+ (bdp->cur_dplx_mode == HALF_DUPLEX) ? DUPLEX_HALF : DUPLEX_FULL;
+ }
+ else {
+ ecmd.speed = -1;
+ ecmd.duplex = -1;
+ }
+
+ ecmd.advertising = ADVERTISED_TP;
+
+ if (bdp->params.e100_speed_duplex == E100_AUTONEG) {
+ ecmd.autoneg = AUTONEG_ENABLE;
+ ecmd.advertising |= ADVERTISED_Autoneg;
+ } else {
+ ecmd.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (bdp->speed_duplex_caps & SUPPORTED_MII) {
+ e100_mdi_read(bdp, MII_ADVERTISE, bdp->phy_addr, &advert);
+
+ if (advert & ADVERTISE_10HALF)
+ ecmd.advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ ecmd.advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ ecmd.advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ ecmd.advertising |= ADVERTISED_100baseT_Full;
+ } else {
+ ecmd.autoneg = AUTONEG_DISABLE;
+ ecmd.advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+e100_ethtool_set_settings(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ int e100_new_speed_duplex;
+ int ethtool_new_speed_duplex;
+ struct ethtool_cmd ecmd;
+
+ if (!capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+
+ bdp = dev->priv;
+ if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd))) {
+ return -EFAULT;
+ }
+
+ if ((ecmd.autoneg == AUTONEG_ENABLE)
+ && (bdp->speed_duplex_caps & SUPPORTED_Autoneg)) {
+ bdp->params.e100_speed_duplex = E100_AUTONEG;
+ e100_set_speed_duplex(bdp);
+ } else {
+ if (ecmd.speed == SPEED_10) {
+ if (ecmd.duplex == DUPLEX_HALF) {
+ e100_new_speed_duplex =
+ E100_SPEED_10_HALF;
+ ethtool_new_speed_duplex =
+ SUPPORTED_10baseT_Half;
+ } else {
+ e100_new_speed_duplex =
+ E100_SPEED_10_FULL;
+ ethtool_new_speed_duplex =
+ SUPPORTED_10baseT_Full;
+ }
+ } else {
+ if (ecmd.duplex == DUPLEX_HALF) {
+ e100_new_speed_duplex =
+ E100_SPEED_100_HALF;
+ ethtool_new_speed_duplex =
+ SUPPORTED_100baseT_Half;
+ } else {
+ e100_new_speed_duplex =
+ E100_SPEED_100_FULL;
+ ethtool_new_speed_duplex =
+ SUPPORTED_100baseT_Full;
+ }
+ }
+
+ if (bdp->speed_duplex_caps & ethtool_new_speed_duplex) {
+ bdp->params.e100_speed_duplex =
+ e100_new_speed_duplex;
+ e100_set_speed_duplex(bdp);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+e100_ethtool_glink(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ struct ethtool_value info;
+
+ memset((void *) &info, 0, sizeof (info));
+
+ bdp = dev->priv;
+ info.cmd = ETHTOOL_GLINK;
+
+ /* Consider both PHY link and netif_running */
+ info.data = e100_update_link_state(bdp);
+
+ if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+e100_ethtool_test(struct net_device *dev, struct ifreq *ifr)
+{
+ struct ethtool_test *info;
+ int rc = -EFAULT;
+
+ info = kmalloc(sizeof(*info) + E100_MAX_TEST_RES * sizeof(u64),
+ GFP_ATOMIC);
+
+ if (!info)
+ return -ENOMEM;
+
+ memset((void *) info, 0, sizeof(*info) +
+ E100_MAX_TEST_RES * sizeof(u64));
+
+ if (copy_from_user(info, ifr->ifr_data, sizeof(*info)))
+ goto exit;
+
+ info->flags = e100_run_diag(dev, info->data, info->flags);
+
+ if (!copy_to_user(ifr->ifr_data, info,
+ sizeof(*info) + E100_MAX_TEST_RES * sizeof(u64)))
+ rc = 0;
+exit:
+ kfree(info);
+ return rc;
+}
+
+static int
+e100_ethtool_gregs(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ u32 regs_buff[E100_REGS_LEN];
+ struct ethtool_regs regs = {ETHTOOL_GREGS};
+ void *addr = ifr->ifr_data;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ bdp = dev->priv;
+
+ if(copy_from_user(®s, addr, sizeof(regs)))
+ return -EFAULT;
+
+ regs.version = (1 << 24) | bdp->rev_id;
+ regs_buff[0] = readb(&(bdp->scb->scb_cmd_hi)) << 24 |
+ readb(&(bdp->scb->scb_cmd_low)) << 16 |
+ readw(&(bdp->scb->scb_status));
+
+ if(copy_to_user(addr, ®s, sizeof(regs)))
+ return -EFAULT;
+
+ addr += offsetof(struct ethtool_regs, data);
+ if(copy_to_user(addr, regs_buff, regs.len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+e100_ethtool_nway_rst(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ bdp = dev->priv;
+
+ if ((bdp->speed_duplex_caps & SUPPORTED_Autoneg) &&
+ (bdp->params.e100_speed_duplex == E100_AUTONEG)) {
+ e100_set_speed_duplex(bdp);
+ } else {
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int
+e100_ethtool_get_drvinfo(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ struct ethtool_drvinfo info;
+
+ memset((void *) &info, 0, sizeof (info));
+
+ bdp = dev->priv;
+
+ strncpy(info.driver, e100_short_driver_name, sizeof (info.driver) - 1);
+ strncpy(info.version, e100_driver_version, sizeof (info.version) - 1);
+ strncpy(info.fw_version, "N/A",
+ sizeof (info.fw_version) - 1);
+ strncpy(info.bus_info, bdp->pdev->slot_name,
+ sizeof (info.bus_info) - 1);
+ info.n_stats = E100_STATS_LEN;
+ info.regdump_len = E100_REGS_LEN * sizeof(u32);
+ info.eedump_len = (bdp->eeprom_size << 1);
+ info.testinfo_len = E100_MAX_TEST_RES;
+ if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+e100_ethtool_eeprom(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ struct ethtool_eeprom ecmd;
+ u16 eeprom_data[256];
+ u16 *usr_eeprom_ptr;
+ u16 first_word, last_word;
+ int i, max_len;
+ void *ptr;
+ u8 *eeprom_data_bytes = (u8 *)eeprom_data;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ bdp = dev->priv;
+
+ if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
+ return -EFAULT;
+
+ usr_eeprom_ptr =
+ (u16 *) (ifr->ifr_data + offsetof(struct ethtool_eeprom, data));
+
+ max_len = bdp->eeprom_size * 2;
+
+ if (ecmd.offset > ecmd.offset + ecmd.len)
+ return -EINVAL;
+
+ if ((ecmd.offset + ecmd.len) > max_len)
+ ecmd.len = (max_len - ecmd.offset);
+
+ first_word = ecmd.offset >> 1;
+ last_word = (ecmd.offset + ecmd.len - 1) >> 1;
+
+ if (first_word >= bdp->eeprom_size)
+ return -EFAULT;
+
+ if (ecmd.cmd == ETHTOOL_GEEPROM) {
+ for(i = 0; i <= (last_word - first_word); i++)
+ eeprom_data[i] = e100_eeprom_read(bdp, first_word + i);
+
+ ecmd.magic = E100_EEPROM_MAGIC;
+
+ if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
+ return -EFAULT;
+
+ if(ecmd.offset & 1)
+ eeprom_data_bytes++;
+ if (copy_to_user(usr_eeprom_ptr, eeprom_data_bytes, ecmd.len))
+ return -EFAULT;
+ } else {
+ if (ecmd.magic != E100_EEPROM_MAGIC)
+ return -EFAULT;
+
+ ptr = (void *)eeprom_data;
+ if(ecmd.offset & 1) {
+ /* need modification of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_data[0] = e100_eeprom_read(bdp, first_word);
+ ptr++;
+ }
+ if((ecmd.offset + ecmd.len) & 1) {
+ /* need modification of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_data[last_word - first_word] =
+ e100_eeprom_read(bdp, last_word);
+ }
+ if(copy_from_user(ptr, usr_eeprom_ptr, ecmd.len))
+ return -EFAULT;
+
+ e100_eeprom_write_block(bdp, first_word, eeprom_data,
+ last_word - first_word + 1);
+
+ if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
+ return -EFAULT;
+ }
+ return 0;
+}
+#endif
+
+#define E100_BLINK_INTERVAL (HZ/4)
+/**
+ * e100_led_control
+ * @bdp: atapter's private data struct
+ * @led_mdi_op: led operation
+ *
+ * Software control over adapter's led. The possible operations are:
+ * TURN LED OFF, TURN LED ON and RETURN LED CONTROL TO HARDWARE.
+ */
+static void
+e100_led_control(struct e100_private *bdp, u16 led_mdi_op)
+{
+ e100_mdi_write(bdp, PHY_82555_LED_SWITCH_CONTROL,
+ bdp->phy_addr, led_mdi_op);
+
+}
+/**
+ * e100_led_blink_callback
+ * @data: pointer to atapter's private data struct
+ *
+ * Blink timer callback function. Toggles ON/OFF led status bit and calls
+ * led hardware access function.
+ */
+static void
+e100_led_blink_callback(unsigned long data)
+{
+ struct e100_private *bdp = (struct e100_private *) data;
+
+ if(bdp->flags & LED_IS_ON) {
+ bdp->flags &= ~LED_IS_ON;
+ e100_led_control(bdp, PHY_82555_LED_OFF);
+ } else {
+ bdp->flags |= LED_IS_ON;
+ if (bdp->rev_id >= D101MA_REV_ID)
+ e100_led_control(bdp, PHY_82555_LED_ON_559);
+ else
+ e100_led_control(bdp, PHY_82555_LED_ON_PRE_559);
+ }
+
+ mod_timer(&bdp->blink_timer, jiffies + E100_BLINK_INTERVAL);
+}
+/**
+ * e100_ethtool_led_blink
+ * @dev: pointer to atapter's net_device struct
+ * @ifr: pointer to ioctl request structure
+ *
+ * Blink led ioctl handler. Initialtes blink timer and sleeps until
+ * blink period expires. Than it kills timer and returns. The led control
+ * is returned back to hardware when blink timer is killed.
+ */
+static int
+e100_ethtool_led_blink(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ struct ethtool_value ecmd;
+
+ bdp = dev->priv;
+
+ if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
+ return -EFAULT;
+
+ if(!bdp->blink_timer.function) {
+ init_timer(&bdp->blink_timer);
+ bdp->blink_timer.function = e100_led_blink_callback;
+ bdp->blink_timer.data = (unsigned long) bdp;
+ }
+
+ mod_timer(&bdp->blink_timer, jiffies);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ((!ecmd.data) || (ecmd.data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)))
+ ecmd.data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+
+ schedule_timeout(ecmd.data * HZ);
+
+ del_timer_sync(&bdp->blink_timer);
+
+ e100_led_control(bdp, PHY_82555_LED_NORMAL_CONTROL);
+
+ return 0;
+}
+
+static inline int __devinit
+e100_10BaseT_adapter(struct e100_private *bdp)
+{
+ return ((bdp->pdev->device == 0x1229) &&
+ (bdp->pdev->subsystem_vendor == 0x8086) &&
+ (bdp->pdev->subsystem_device == 0x0003));
+}
+
+static void __devinit
+e100_get_speed_duplex_caps(struct e100_private *bdp)
+{
+ u16 status;
+
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &status);
+
+ bdp->speed_duplex_caps = 0;
+
+ bdp->speed_duplex_caps |=
+ (status & BMSR_ANEGCAPABLE) ? SUPPORTED_Autoneg : 0;
+
+ bdp->speed_duplex_caps |=
+ (status & BMSR_10HALF) ? SUPPORTED_10baseT_Half : 0;
+
+ bdp->speed_duplex_caps |=
+ (status & BMSR_10FULL) ? SUPPORTED_10baseT_Full : 0;
+
+ bdp->speed_duplex_caps |=
+ (status & BMSR_100HALF) ? SUPPORTED_100baseT_Half : 0;
+
+ bdp->speed_duplex_caps |=
+ (status & BMSR_100FULL) ? SUPPORTED_100baseT_Full : 0;
+
+ if (IS_NC3133(bdp))
+ bdp->speed_duplex_caps =
+ (SUPPORTED_FIBRE | SUPPORTED_100baseT_Full);
+ else
+ bdp->speed_duplex_caps |= SUPPORTED_TP;
+
+ if ((status == 0xFFFF) && e100_10BaseT_adapter(bdp)) {
+ bdp->speed_duplex_caps =
+ (SUPPORTED_10baseT_Half | SUPPORTED_TP);
+ } else {
+ bdp->speed_duplex_caps |= SUPPORTED_MII;
+ }
+
+}
+
+#ifdef CONFIG_PM
+static unsigned char
+e100_setup_filter(struct e100_private *bdp)
+{
+ cb_header_t *ntcb_hdr;
+ unsigned char res = false;
+ nxmit_cb_entry_t *cmd;
+
+ if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
+ goto exit;
+ }
+
+ ntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
+ ntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_LOAD_FILTER);
+
+ /* Set EL and FIX bit */
+ (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] =
+ __constant_cpu_to_le32(CB_FILTER_EL | CB_FILTER_FIX);
+
+ if (bdp->wolopts & WAKE_UCAST) {
+ (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] |=
+ __constant_cpu_to_le32(CB_FILTER_IA_MATCH);
+ }
+
+ if (bdp->wolopts & WAKE_ARP) {
+ /* Setup ARP bit and lower IP parts */
+ /* bdp->ip_lbytes contains 2 lower bytes of IP address in network byte order */
+ (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] |=
+ cpu_to_le32(CB_FILTER_ARP | bdp->ip_lbytes);
+ }
+
+ res = e100_exec_non_cu_cmd(bdp, cmd);
+ if (!res)
+ printk(KERN_WARNING "e100: %s: Filter setup failed\n",
+ bdp->device->name);
+
+exit:
+ return res;
+
+}
+
+static void
+e100_do_wol(struct pci_dev *pcid, struct e100_private *bdp)
+{
+ e100_config_wol(bdp);
+
+ if (e100_config(bdp)) {
+ if (bdp->wolopts & (WAKE_UCAST | WAKE_ARP))
+ if (!e100_setup_filter(bdp))
+ printk(KERN_ERR
+ "e100: WOL options failed\n");
+ } else {
+ printk(KERN_ERR "e100: config WOL failed\n");
+ }
+}
+#endif
+
+#if 0
+static u16
+e100_get_ip_lbytes(struct net_device *dev)
+{
+ struct in_ifaddr *ifa;
+ struct in_device *in_dev;
+ u32 res = 0;
+
+ in_dev = (struct in_device *) dev->ip_ptr;
+ /* Check if any in_device bound to interface */
+ if (in_dev) {
+ /* Check if any IP address is bound to interface */
+ if ((ifa = in_dev->ifa_list) != NULL) {
+ res = __constant_ntohl(ifa->ifa_address);
+ res = __constant_htons(res & 0x0000ffff);
+ }
+ }
+ return res;
+}
+
+static int
+e100_ethtool_wol(struct net_device *dev, struct ifreq *ifr)
+{
+ struct e100_private *bdp;
+ struct ethtool_wolinfo wolinfo;
+ int res = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ bdp = dev->priv;
+
+ if (copy_from_user(&wolinfo, ifr->ifr_data, sizeof (wolinfo))) {
+ return -EFAULT;
+ }
+
+ switch (wolinfo.cmd) {
+ case ETHTOOL_GWOL:
+ wolinfo.supported = bdp->wolsupported;
+ wolinfo.wolopts = bdp->wolopts;
+ if (copy_to_user(ifr->ifr_data, &wolinfo, sizeof (wolinfo)))
+ res = -EFAULT;
+ break;
+ case ETHTOOL_SWOL:
+ /* If ALL requests are supported or request is DISABLE wol */
+ if (((wolinfo.wolopts & bdp->wolsupported) == wolinfo.wolopts)
+ || (wolinfo.wolopts == 0)) {
+ bdp->wolopts = wolinfo.wolopts;
+ } else {
+ res = -EOPNOTSUPP;
+ }
+ if (wolinfo.wolopts & WAKE_ARP)
+ bdp->ip_lbytes = e100_get_ip_lbytes(dev);
+ break;
+ default:
+ break;
+ }
+ return res;
+}
+
+static int e100_ethtool_gstrings(struct net_device *dev, struct ifreq *ifr)
+{
+ struct ethtool_gstrings info;
+ char *strings = NULL;
+ char *usr_strings;
+ int i;
+
+ memset((void *) &info, 0, sizeof(info));
+
+ usr_strings = (u8 *) (ifr->ifr_data +
+ offsetof(struct ethtool_gstrings, data));
+
+ if (copy_from_user(&info, ifr->ifr_data, sizeof (info)))
+ return -EFAULT;
+
+ switch (info.string_set) {
+ case ETH_SS_TEST: {
+ int ret = 0;
+ if (info.len > E100_MAX_TEST_RES)
+ info.len = E100_MAX_TEST_RES;
+ strings = kmalloc(info.len * ETH_GSTRING_LEN, GFP_ATOMIC);
+ if (!strings)
+ return -ENOMEM;
+ memset(strings, 0, info.len * ETH_GSTRING_LEN);
+
+ for (i = 0; i < info.len; i++) {
+ sprintf(strings + i * ETH_GSTRING_LEN, "%-31s",
+ test_strings[i]);
+ }
+ if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
+ ret = -EFAULT;
+ if (copy_to_user(usr_strings, strings, info.len * ETH_GSTRING_LEN))
+ ret = -EFAULT;
+ kfree(strings);
+ return ret;
+ }
+ case ETH_SS_STATS: {
+ char *strings = NULL;
+ void *addr = ifr->ifr_data;
+ info.len = E100_STATS_LEN;
+ strings = *e100_gstrings_stats;
+ if(copy_to_user(ifr->ifr_data, &info, sizeof(info)))
+ return -EFAULT;
+ addr += offsetof(struct ethtool_gstrings, data);
+ if(copy_to_user(addr, strings,
+ info.len * ETH_GSTRING_LEN))
+ return -EFAULT;
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+e100_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct e100_private *bdp;
+ struct mii_ioctl_data *data_ptr =
+ (struct mii_ioctl_data *) &(ifr->ifr_data);
+
+ bdp = dev->priv;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data_ptr->phy_id = bdp->phy_addr & 0x1f;
+ break;
+
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ e100_mdi_read(bdp, data_ptr->reg_num & 0x1f, bdp->phy_addr,
+ &(data_ptr->val_out));
+ break;
+
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* If reg = 0 && change speed/duplex */
+ if (data_ptr->reg_num == 0 &&
+ (data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART) /* restart cmd */
+ || data_ptr->val_in == (BMCR_RESET) /* reset cmd */
+ || data_ptr->val_in & (BMCR_SPEED100 | BMCR_FULLDPLX)
+ || data_ptr->val_in == 0)) {
+ if (data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART)
+ || data_ptr->val_in == (BMCR_RESET))
+ bdp->params.e100_speed_duplex = E100_AUTONEG;
+ else if (data_ptr->val_in == (BMCR_SPEED100 | BMCR_FULLDPLX))
+ bdp->params.e100_speed_duplex = E100_SPEED_100_FULL;
+ else if (data_ptr->val_in == (BMCR_SPEED100))
+ bdp->params.e100_speed_duplex = E100_SPEED_100_HALF;
+ else if (data_ptr->val_in == (BMCR_FULLDPLX))
+ bdp->params.e100_speed_duplex = E100_SPEED_10_FULL;
+ else
+ bdp->params.e100_speed_duplex = E100_SPEED_10_HALF;
+ e100_set_speed_duplex(bdp);
+ }
+ else
+ /* Only allows changing speed/duplex */
+ return -EINVAL;
+
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+#endif
+
+nxmit_cb_entry_t *
+e100_alloc_non_tx_cmd(struct e100_private *bdp)
+{
+ nxmit_cb_entry_t *non_tx_cmd_elem;
+
+ if (!(non_tx_cmd_elem = (nxmit_cb_entry_t *)
+ kmalloc(sizeof (nxmit_cb_entry_t), GFP_ATOMIC))) {
+ return NULL;
+ }
+ non_tx_cmd_elem->non_tx_cmd =
+ pci_alloc_consistent(bdp->pdev, sizeof (nxmit_cb_t),
+ &(non_tx_cmd_elem->dma_addr));
+ if (non_tx_cmd_elem->non_tx_cmd == NULL) {
+ kfree(non_tx_cmd_elem);
+ return NULL;
+ }
+ return non_tx_cmd_elem;
+}
+
+void
+e100_free_non_tx_cmd(struct e100_private *bdp,
+ nxmit_cb_entry_t *non_tx_cmd_elem)
+{
+ pci_free_consistent(bdp->pdev, sizeof (nxmit_cb_t),
+ non_tx_cmd_elem->non_tx_cmd,
+ non_tx_cmd_elem->dma_addr);
+ kfree(non_tx_cmd_elem);
+}
+
+static void
+e100_free_nontx_list(struct e100_private *bdp)
+{
+ nxmit_cb_entry_t *command;
+ int i;
+
+ while (!list_empty(&bdp->non_tx_cmd_list)) {
+ command = list_entry(bdp->non_tx_cmd_list.next,
+ nxmit_cb_entry_t, list_elem);
+ list_del(&(command->list_elem));
+ e100_free_non_tx_cmd(bdp, command);
+ }
+
+ for (i = 0; i < CB_MAX_NONTX_CMD; i++) {
+ bdp->same_cmd_entry[i] = NULL;
+ }
+}
+
+static unsigned char
+e100_delayed_exec_non_cu_cmd(struct e100_private *bdp,
+ nxmit_cb_entry_t *command)
+{
+ nxmit_cb_entry_t *same_command;
+ cb_header_t *ntcb_hdr;
+ u16 cmd;
+
+ ntcb_hdr = (cb_header_t *) command->non_tx_cmd;
+
+ cmd = CB_CMD_MASK & le16_to_cpu(ntcb_hdr->cb_cmd);
+
+ spin_lock_bh(&(bdp->bd_non_tx_lock));
+
+ same_command = bdp->same_cmd_entry[cmd];
+
+ if (same_command != NULL) {
+ memcpy((void *) (same_command->non_tx_cmd),
+ (void *) (command->non_tx_cmd), sizeof (nxmit_cb_t));
+ e100_free_non_tx_cmd(bdp, command);
+ } else {
+ list_add_tail(&(command->list_elem), &(bdp->non_tx_cmd_list));
+ bdp->same_cmd_entry[cmd] = command;
+ }
+
+ if (bdp->non_tx_command_state == E100_NON_TX_IDLE) {
+ bdp->non_tx_command_state = E100_WAIT_TX_FINISH;
+ mod_timer(&(bdp->nontx_timer_id), jiffies + 1);
+ }
+
+ spin_unlock_bh(&(bdp->bd_non_tx_lock));
+ return true;
+}
+
+static void
+e100_non_tx_background(unsigned long ptr)
+{
+ struct e100_private *bdp = (struct e100_private *) ptr;
+ nxmit_cb_entry_t *active_command;
+ int restart = true;
+ cb_header_t *non_tx_cmd;
+ u8 sub_cmd;
+
+ spin_lock_bh(&(bdp->bd_non_tx_lock));
+
+ switch (bdp->non_tx_command_state) {
+ case E100_WAIT_TX_FINISH:
+ if (bdp->last_tcb != NULL) {
+ rmb();
+ if ((bdp->last_tcb->tcb_hdr.cb_status &
+ __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
+ goto exit;
+ }
+ if ((readw(&bdp->scb->scb_status) & SCB_CUS_MASK) ==
+ SCB_CUS_ACTIVE) {
+ goto exit;
+ }
+ break;
+
+ case E100_WAIT_NON_TX_FINISH:
+ active_command = list_entry(bdp->non_tx_cmd_list.next,
+ nxmit_cb_entry_t, list_elem);
+ rmb();
+
+ if (((((cb_header_t *) (active_command->non_tx_cmd))->cb_status
+ & __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
+ && time_before(jiffies, active_command->expiration_time)) {
+ goto exit;
+ } else {
+ non_tx_cmd = (cb_header_t *) active_command->non_tx_cmd;
+ sub_cmd = CB_CMD_MASK & le16_to_cpu(non_tx_cmd->cb_cmd);
+#ifdef E100_CU_DEBUG
+ if (!(non_tx_cmd->cb_status
+ & __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
+ printk(KERN_ERR "e100: %s: Queued "
+ "command (%x) timeout\n",
+ bdp->device->name, sub_cmd);
+#endif
+ list_del(&(active_command->list_elem));
+ e100_free_non_tx_cmd(bdp, active_command);
+ }
+ break;
+
+ default:
+ break;
+ } //switch
+
+ if (list_empty(&bdp->non_tx_cmd_list)) {
+ bdp->non_tx_command_state = E100_NON_TX_IDLE;
+ spin_lock_irq(&(bdp->bd_lock));
+ bdp->next_cu_cmd = START_WAIT;
+ spin_unlock_irq(&(bdp->bd_lock));
+ restart = false;
+ goto exit;
+ } else {
+ u16 cmd_type;
+
+ bdp->non_tx_command_state = E100_WAIT_NON_TX_FINISH;
+ active_command = list_entry(bdp->non_tx_cmd_list.next,
+ nxmit_cb_entry_t, list_elem);
+ sub_cmd = ((cb_header_t *) active_command->non_tx_cmd)->cb_cmd;
+ spin_lock_irq(&(bdp->bd_lock));
+ e100_wait_exec_cmplx(bdp, active_command->dma_addr,
+ SCB_CUC_START, sub_cmd);
+ spin_unlock_irq(&(bdp->bd_lock));
+ active_command->expiration_time = jiffies + HZ;
+ cmd_type = CB_CMD_MASK &
+ le16_to_cpu(((cb_header_t *)
+ (active_command->non_tx_cmd))->cb_cmd);
+ bdp->same_cmd_entry[cmd_type] = NULL;
+ }
+
+exit:
+ if (restart) {
+ mod_timer(&(bdp->nontx_timer_id), jiffies + 1);
+ } else {
+ if (netif_running(bdp->device))
+ netif_wake_queue(bdp->device);
+ }
+ spin_unlock_bh(&(bdp->bd_non_tx_lock));
+}
+
+static void
+e100_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct e100_private *bdp = netdev->priv;
+
+ e100_disable_clear_intr(bdp);
+ bdp->vlgrp = grp;
+
+ if(grp) {
+ /* enable VLAN tag insert/strip */
+ e100_config_vlan_drop(bdp, true);
+
+ } else {
+ /* disable VLAN tag insert/strip */
+ e100_config_vlan_drop(bdp, false);
+ }
+
+ e100_config(bdp);
+ e100_set_intr_mask(bdp);
+}
+
+static void
+e100_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ /* We don't do Vlan filtering */
+ return;
+}
+
+static void
+e100_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct e100_private *bdp = netdev->priv;
+
+ if(bdp->vlgrp)
+ bdp->vlgrp->vlan_devices[vid] = NULL;
+ /* We don't do Vlan filtering */
+ return;
+}
+
+#ifdef CONFIG_PM
+static int
+e100_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
+{
+ struct pci_dev *pdev;
+
+ switch(event) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ pci_for_each_dev(pdev) {
+ if(pci_dev_driver(pdev) == &e100_driver) {
+ /* If net_device struct is allocated? */
+ if (pci_get_drvdata(pdev))
+ e100_suspend(pdev, 3);
+
+ }
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static int
+e100_suspend(struct pci_dev *pcid, u32 state)
+{
+ struct net_device *netdev = pci_get_drvdata(pcid);
+ struct e100_private *bdp = netdev->priv;
+
+ e100_isolate_driver(bdp);
+ pci_save_state(pcid, bdp->pci_state);
+
+ /* Enable or disable WoL */
+ e100_do_wol(pcid, bdp);
+
+ /* If wol is enabled */
+ if (bdp->wolopts || e100_asf_enabled(bdp)) {
+ pci_enable_wake(pcid, 3, 1); /* Enable PME for power state D3 */
+ pci_set_power_state(pcid, 3); /* Set power state to D3. */
+ } else {
+ /* Disable bus mastering */
+ pci_disable_device(pcid);
+ pci_set_power_state(pcid, state);
+ }
+ return 0;
+}
+
+static int
+e100_resume(struct pci_dev *pcid)
+{
+ struct net_device *netdev = pci_get_drvdata(pcid);
+ struct e100_private *bdp = netdev->priv;
+
+ pci_set_power_state(pcid, 0);
+ pci_enable_wake(pcid, 0, 0); /* Clear PME status and disable PME */
+ pci_restore_state(pcid, bdp->pci_state);
+
+ /* Also do device full reset because device was in D3 state */
+ e100_deisolate_driver(bdp, true);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * e100_asf_enabled - checks if ASF is configured on the current adaper
+ * by reading registers 0xD and 0x90 in the EEPROM
+ * @bdp: atapter's private data struct
+ *
+ * Returns: true if ASF is enabled
+ */
+static unsigned char
+e100_asf_enabled(struct e100_private *bdp)
+{
+ u16 asf_reg;
+ u16 smbus_addr_reg;
+ if ((bdp->pdev->device >= 0x1050) && (bdp->pdev->device <= 0x1055)) {
+ asf_reg = e100_eeprom_read(bdp, EEPROM_CONFIG_ASF);
+ if ((asf_reg & EEPROM_FLAG_ASF)
+ && !(asf_reg & EEPROM_FLAG_GCL)) {
+ smbus_addr_reg =
+ e100_eeprom_read(bdp, EEPROM_SMBUS_ADDR);
+ if ((smbus_addr_reg & 0xFF) != 0xFE)
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef E100_CU_DEBUG
+unsigned char
+e100_cu_unknown_state(struct e100_private *bdp)
+{
+ u8 scb_cmd_low;
+ u16 scb_status;
+ scb_cmd_low = bdp->scb->scb_cmd_low;
+ scb_status = le16_to_cpu(bdp->scb->scb_status);
+ /* If CU is active and executing unknown cmd */
+ if (scb_status & SCB_CUS_ACTIVE && scb_cmd_low & SCB_CUC_UNKNOWN)
+ return true;
+ else
+ return false;
+}
+#endif
+
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+#include "e100_phy.h"
+
+void e100_handle_zlock(struct e100_private *bdp);
+
+/*
+ * Procedure: e100_mdi_write
+ *
+ * Description: This routine will write a value to the specified MII register
+ * of an external MDI compliant device (e.g. PHY 100). The
+ * command will execute in polled mode.
+ *
+ * Arguments:
+ * bdp - Ptr to this card's e100_bdconfig structure
+ * reg_addr - The MII register that we are writing to
+ * phy_addr - The MDI address of the Phy component.
+ * data - The value that we are writing to the MII register.
+ *
+ * Returns:
+ * NOTHING
+ */
+int
+e100_mdi_write(struct e100_private *bdp, u32 reg_addr, u32 phy_addr, u16 data)
+{
+ int e100_retry;
+ u32 temp_val;
+ unsigned int mdi_cntrl;
+
+ spin_lock_bh(&bdp->mdi_access_lock);
+ temp_val = (((u32) data) | (reg_addr << 16) |
+ (phy_addr << 21) | (MDI_WRITE << 26));
+ writel(temp_val, &bdp->scb->scb_mdi_cntrl);
+ readw(&bdp->scb->scb_status);
+
+ /* wait 20usec before checking status */
+ udelay(20);
+
+ /* poll for the mdi write to complete */
+ e100_retry = E100_CMD_WAIT;
+ while ((!((mdi_cntrl = readl(&bdp->scb->scb_mdi_cntrl)) & MDI_PHY_READY)) && (e100_retry)) {
+
+ udelay(20);
+ e100_retry--;
+ }
+ spin_unlock_bh(&bdp->mdi_access_lock);
+ if (mdi_cntrl & MDI_PHY_READY)
+ return 0;
+ else {
+ printk(KERN_ERR "e100: MDI write timeout\n");
+ return 1;
+ }
+}
+
+/*
+ * Procedure: e100_mdi_read
+ *
+ * Description: This routine will read a value from the specified MII register
+ * of an external MDI compliant device (e.g. PHY 100), and return
+ * it to the calling routine. The command will execute in polled
+ * mode.
+ *
+ * Arguments:
+ * bdp - Ptr to this card's e100_bdconfig structure
+ * reg_addr - The MII register that we are reading from
+ * phy_addr - The MDI address of the Phy component.
+ *
+ * Results:
+ * data - The value that we read from the MII register.
+ *
+ * Returns:
+ * NOTHING
+ */
+int
+e100_mdi_read(struct e100_private *bdp, u32 reg_addr, u32 phy_addr, u16 *data)
+{
+ int e100_retry;
+ u32 temp_val;
+ unsigned int mdi_cntrl;
+
+ spin_lock_bh(&bdp->mdi_access_lock);
+ /* Issue the read command to the MDI control register. */
+ temp_val = ((reg_addr << 16) | (phy_addr << 21) | (MDI_READ << 26));
+ writel(temp_val, &bdp->scb->scb_mdi_cntrl);
+ readw(&bdp->scb->scb_status);
+
+ /* wait 20usec before checking status */
+ udelay(20);
+
+ /* poll for the mdi read to complete */
+ e100_retry = E100_CMD_WAIT;
+ while ((!((mdi_cntrl = readl(&bdp->scb->scb_mdi_cntrl)) & MDI_PHY_READY)) && (e100_retry)) {
+
+ udelay(20);
+ e100_retry--;
+ }
+
+ spin_unlock_bh(&bdp->mdi_access_lock);
+ if (mdi_cntrl & MDI_PHY_READY) {
+ /* return the lower word */
+ *data = (u16) mdi_cntrl;
+ return 0;
+ }
+ else {
+ printk(KERN_ERR "e100: MDI read timeout\n");
+ return 1;
+ }
+}
+
+static unsigned char __devinit
+e100_phy_valid(struct e100_private *bdp, unsigned int phy_address)
+{
+ u16 ctrl_reg, stat_reg;
+
+ /* Read the MDI control register */
+ e100_mdi_read(bdp, MII_BMCR, phy_address, &ctrl_reg);
+
+ /* Read the status register twice, bacause of sticky bits */
+ e100_mdi_read(bdp, MII_BMSR, phy_address, &stat_reg);
+ e100_mdi_read(bdp, MII_BMSR, phy_address, &stat_reg);
+
+ if ((ctrl_reg == 0xffff) || ((stat_reg == 0) && (ctrl_reg == 0)))
+ return false;
+
+ return true;
+}
+
+static void __devinit
+e100_phy_address_detect(struct e100_private *bdp)
+{
+ unsigned int addr;
+ unsigned char valid_phy_found = false;
+
+ if (IS_NC3133(bdp)) {
+ bdp->phy_addr = 0;
+ return;
+ }
+
+ if (e100_phy_valid(bdp, PHY_DEFAULT_ADDRESS)) {
+ bdp->phy_addr = PHY_DEFAULT_ADDRESS;
+ valid_phy_found = true;
+
+ } else {
+ for (addr = MIN_PHY_ADDR; addr <= MAX_PHY_ADDR; addr++) {
+ if (e100_phy_valid(bdp, addr)) {
+ bdp->phy_addr = addr;
+ valid_phy_found = true;
+ break;
+ }
+ }
+ }
+
+ if (!valid_phy_found) {
+ bdp->phy_addr = PHY_ADDRESS_503;
+ }
+}
+
+static void __devinit
+e100_phy_id_detect(struct e100_private *bdp)
+{
+ u16 low_id_reg, high_id_reg;
+
+ if (bdp->phy_addr == PHY_ADDRESS_503) {
+ bdp->PhyId = PHY_503;
+ return;
+ }
+ if (!(bdp->flags & IS_ICH)) {
+ if (bdp->rev_id >= D102_REV_ID) {
+ bdp->PhyId = PHY_82562ET;
+ return;
+ }
+ }
+
+ /* Read phy id from the MII register */
+ e100_mdi_read(bdp, MII_PHYSID1, bdp->phy_addr, &low_id_reg);
+ e100_mdi_read(bdp, MII_PHYSID2, bdp->phy_addr, &high_id_reg);
+
+ bdp->PhyId = ((unsigned int) low_id_reg |
+ ((unsigned int) high_id_reg << 16));
+}
+
+static void __devinit
+e100_phy_isolate(struct e100_private *bdp)
+{
+ unsigned int phy_address;
+ u16 ctrl_reg;
+
+ /* Go over all phy addresses. Deisolate the selected one, and isolate
+ * all the rest */
+ for (phy_address = 0; phy_address <= MAX_PHY_ADDR; phy_address++) {
+ if (phy_address != bdp->phy_addr) {
+ e100_mdi_write(bdp, MII_BMCR, phy_address,
+ BMCR_ISOLATE);
+
+ } else {
+ e100_mdi_read(bdp, MII_BMCR, bdp->phy_addr, &ctrl_reg);
+ ctrl_reg &= ~BMCR_ISOLATE;
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, ctrl_reg);
+ }
+
+ udelay(100);
+ }
+}
+
+static unsigned char __devinit
+e100_phy_specific_setup(struct e100_private *bdp)
+{
+ u16 misc_reg;
+
+ if (bdp->phy_addr == PHY_ADDRESS_503) {
+ switch (bdp->params.e100_speed_duplex) {
+ case E100_AUTONEG:
+ /* The adapter can't autoneg. so set to 10/HALF */
+ printk(KERN_INFO
+ "e100: 503 serial component detected which "
+ "cannot autonegotiate\n");
+ printk(KERN_INFO
+ "e100: speed/duplex forced to "
+ "10Mbps / Half duplex\n");
+ bdp->params.e100_speed_duplex = E100_SPEED_10_HALF;
+ break;
+
+ case E100_SPEED_100_HALF:
+ case E100_SPEED_100_FULL:
+ printk(KERN_ERR
+ "e100: 503 serial component detected "
+ "which does not support 100Mbps\n");
+ printk(KERN_ERR
+ "e100: Change the forced speed/duplex "
+ "to a supported setting\n");
+ return false;
+ }
+
+ return true;
+ }
+
+ if (IS_NC3133(bdp)) {
+ u16 int_reg;
+
+ /* enable 100BASE fiber interface */
+ e100_mdi_write(bdp, MDI_NC3133_CONFIG_REG, bdp->phy_addr,
+ MDI_NC3133_100FX_ENABLE);
+
+ if ((bdp->params.e100_speed_duplex != E100_AUTONEG) &&
+ (bdp->params.e100_speed_duplex != E100_SPEED_100_FULL)) {
+ /* just inform user about 100 full */
+ printk(KERN_ERR "e100: NC3133 NIC can only run "
+ "at 100Mbps full duplex\n");
+ }
+
+ bdp->params.e100_speed_duplex = E100_SPEED_100_FULL;
+
+ /* enable interrupts */
+ e100_mdi_read(bdp, MDI_NC3133_INT_ENABLE_REG,
+ bdp->phy_addr, &int_reg);
+ int_reg |= MDI_NC3133_INT_ENABLE;
+ e100_mdi_write(bdp, MDI_NC3133_INT_ENABLE_REG,
+ bdp->phy_addr, int_reg);
+ }
+
+ /* Handle the National TX */
+ if ((bdp->PhyId & PHY_MODEL_REV_ID_MASK) == PHY_NSC_TX) {
+ e100_mdi_read(bdp, NSC_CONG_CONTROL_REG,
+ bdp->phy_addr, &misc_reg);
+
+ misc_reg |= NSC_TX_CONG_TXREADY;
+
+ /* disable the congestion control bit in the National Phy */
+ misc_reg &= ~NSC_TX_CONG_ENABLE;
+
+ e100_mdi_write(bdp, NSC_CONG_CONTROL_REG,
+ bdp->phy_addr, misc_reg);
+ }
+
+ return true;
+}
+
+/*
+ * Procedure: e100_phy_fix_squelch
+ *
+ * Description:
+ * Help find link on certain rare scenarios.
+ * NOTE: This routine must be called once per watchdog,
+ * and *after* setting the current link state.
+ *
+ * Arguments:
+ * bdp - Ptr to this card's e100_bdconfig structure
+ *
+ * Returns:
+ * NOTHING
+ */
+static void
+e100_phy_fix_squelch(struct e100_private *bdp)
+{
+ if ((bdp->PhyId != PHY_82555_TX) || (bdp->flags & DF_SPEED_FORCED))
+ return;
+
+ if (netif_carrier_ok(bdp->device)) {
+ switch (bdp->PhyState) {
+ case 0:
+ break;
+ case 1:
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, 0x0000);
+ break;
+ case 2:
+ e100_mdi_write(bdp, PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, 0x3000);
+ break;
+ }
+ bdp->PhyState = 0;
+ bdp->PhyDelay = 0;
+
+ } else if (!bdp->PhyDelay--) {
+ switch (bdp->PhyState) {
+ case 0:
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, EXTENDED_SQUELCH_BIT);
+ bdp->PhyState = 1;
+ break;
+ case 1:
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, 0x0000);
+ e100_mdi_write(bdp, PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, 0x2010);
+ bdp->PhyState = 2;
+ break;
+ case 2:
+ e100_mdi_write(bdp, PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, 0x3000);
+ bdp->PhyState = 0;
+ break;
+ }
+
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ bdp->PhyDelay = 3;
+ }
+}
+
+/*
+ * Procedure: e100_fix_polarity
+ *
+ * Description:
+ * Fix for 82555 auto-polarity toggle problem. With a short cable
+ * connecting an 82555 with an 840A link partner, if the medium is noisy,
+ * the 82555 sometime thinks that the polarity might be wrong and so
+ * toggles polarity. This happens repeatedly and results in a high bit
+ * error rate.
+ * NOTE: This happens only at 10 Mbps
+ *
+ * Arguments:
+ * bdp - Ptr to this card's e100_bdconfig structure
+ *
+ * Returns:
+ * NOTHING
+ */
+static void __devinit
+e100_fix_polarity(struct e100_private *bdp)
+{
+ u16 status;
+ u16 errors;
+ u16 misc_reg;
+ int speed;
+
+ if ((bdp->PhyId != PHY_82555_TX) && (bdp->PhyId != PHY_82562ET) &&
+ (bdp->PhyId != PHY_82562EM))
+ return;
+
+ /* If the user wants auto-polarity disabled, do only that and nothing *
+ * else. * e100_autopolarity == 0 means disable --- we do just the
+ * disabling * e100_autopolarity == 1 means enable --- we do nothing at
+ * all * e100_autopolarity >= 2 means we do the workaround code. */
+ /* Change for 82558 enhancement */
+ switch (E100_AUTOPOLARITY) {
+ case 0:
+ e100_mdi_read(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, &misc_reg);
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL, bdp->phy_addr,
+ (u16) (misc_reg | DISABLE_AUTO_POLARITY));
+ break;
+
+ case 1:
+ e100_mdi_read(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, &misc_reg);
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL, bdp->phy_addr,
+ (u16) (misc_reg & ~DISABLE_AUTO_POLARITY));
+ break;
+
+ case 2:
+ /* we do this only if link is up */
+ if (!netif_carrier_ok(bdp->device)) {
+ break;
+ }
+
+ e100_mdi_read(bdp, PHY_82555_CSR, bdp->phy_addr, &status);
+ speed = (status & PHY_82555_SPEED_BIT) ? 100 : 10;
+
+ /* we need to do this only if speed is 10 */
+ if (speed != 10) {
+ break;
+ }
+
+ /* see if we have any end of frame errors */
+ e100_mdi_read(bdp, PHY_82555_EOF_COUNTER,
+ bdp->phy_addr, &errors);
+
+ /* if non-zero, wait for 100 ms before reading again */
+ if (errors) {
+ udelay(200);
+ e100_mdi_read(bdp, PHY_82555_EOF_COUNTER,
+ bdp->phy_addr, &errors);
+
+ /* if non-zero again, we disable polarity */
+ if (errors) {
+ e100_mdi_read(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, &misc_reg);
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr,
+ (u16) (misc_reg |
+ DISABLE_AUTO_POLARITY));
+ }
+ }
+
+ if (!errors) {
+ /* it is safe to read the polarity now */
+ e100_mdi_read(bdp, PHY_82555_CSR,
+ bdp->phy_addr, &status);
+
+ /* if polarity is normal, disable polarity */
+ if (!(status & PHY_82555_POLARITY_BIT)) {
+ e100_mdi_read(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr, &misc_reg);
+ e100_mdi_write(bdp, PHY_82555_SPECIAL_CONTROL,
+ bdp->phy_addr,
+ (u16) (misc_reg |
+ DISABLE_AUTO_POLARITY));
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Procedure: e100_find_speed_duplex
+ *
+ * Description: This routine will figure out what line speed and duplex mode
+ * the PHY is currently using.
+ *
+ * Arguments:
+ * bdp - Ptr to this card's e100_bdconfig structure
+ *
+ * Returns:
+ * NOTHING
+ */
+static void
+e100_find_speed_duplex(struct e100_private *bdp)
+{
+ unsigned int PhyId;
+ u16 stat_reg, misc_reg;
+ u16 ad_reg, lp_ad_reg;
+
+ PhyId = bdp->PhyId & PHY_MODEL_REV_ID_MASK;
+
+ /* First we should check to see if we have link */
+ /* If we don't have a link no reason to print a speed and duplex */
+ if (!e100_update_link_state(bdp)) {
+ bdp->cur_line_speed = 0;
+ bdp->cur_dplx_mode = 0;
+ return;
+ }
+
+ /* On the 82559 and later controllers, speed/duplex is part of the *
+ * SCB. So, we save an mdi_read and get these from the SCB. * */
+ if (bdp->rev_id >= D101MA_REV_ID) {
+ /* Read speed */
+ if (readb(&bdp->scb->scb_ext.d101m_scb.scb_gen_stat) & BIT_1)
+ bdp->cur_line_speed = 100;
+ else
+ bdp->cur_line_speed = 10;
+
+ /* Read duplex */
+ if (readb(&bdp->scb->scb_ext.d101m_scb.scb_gen_stat) & BIT_2)
+ bdp->cur_dplx_mode = FULL_DUPLEX;
+ else
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+
+ return;
+ }
+
+ /* If this is a Phy 100, then read bits 1 and 0 of extended register 0,
+ * to get the current speed and duplex settings. */
+ if ((PhyId == PHY_100_A) || (PhyId == PHY_100_C) ||
+ (PhyId == PHY_82555_TX)) {
+
+ /* Read Phy 100 extended register 0 */
+ e100_mdi_read(bdp, EXTENDED_REG_0, bdp->phy_addr, &misc_reg);
+
+ /* Get current speed setting */
+ if (misc_reg & PHY_100_ER0_SPEED_INDIC)
+ bdp->cur_line_speed = 100;
+ else
+ bdp->cur_line_speed = 10;
+
+ /* Get current duplex setting -- FDX enabled if bit is set */
+ if (misc_reg & PHY_100_ER0_FDX_INDIC)
+ bdp->cur_dplx_mode = FULL_DUPLEX;
+ else
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+
+ return;
+ }
+
+ /* See if link partner is capable of Auto-Negotiation (bit 0, reg 6) */
+ e100_mdi_read(bdp, MII_EXPANSION, bdp->phy_addr, &misc_reg);
+
+ /* See if Auto-Negotiation was complete (bit 5, reg 1) */
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &stat_reg);
+
+ /* If a True NWAY connection was made, then we can detect speed/dplx
+ * by ANDing our adapter's advertised abilities with our link partner's
+ * advertised ablilities, and then assuming that the highest common
+ * denominator was chosed by NWAY. */
+ if ((misc_reg & EXPANSION_NWAY) && (stat_reg & BMSR_ANEGCOMPLETE)) {
+
+ /* Read our advertisement register */
+ e100_mdi_read(bdp, MII_ADVERTISE, bdp->phy_addr, &ad_reg);
+
+ /* Read our link partner's advertisement register */
+ e100_mdi_read(bdp, MII_LPA, bdp->phy_addr, &lp_ad_reg);
+
+ /* AND the two advertisement registers together, and get rid
+ * of any extraneous bits. */
+ ad_reg &= (lp_ad_reg & NWAY_LP_ABILITY);
+
+ /* Get speed setting */
+ if (ad_reg &
+ (ADVERTISE_100HALF | ADVERTISE_100FULL |
+ ADVERTISE_100BASE4))
+
+ bdp->cur_line_speed = 100;
+ else
+ bdp->cur_line_speed = 10;
+
+ /* Get duplex setting -- use priority resolution algorithm */
+ if (ad_reg & ADVERTISE_100BASE4) {
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+ } else if (ad_reg & ADVERTISE_100FULL) {
+ bdp->cur_dplx_mode = FULL_DUPLEX;
+ } else if (ad_reg & ADVERTISE_100HALF) {
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+ } else if (ad_reg & ADVERTISE_10FULL) {
+ bdp->cur_dplx_mode = FULL_DUPLEX;
+ } else {
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+ }
+
+ return;
+ }
+
+ /* If we are connected to a dumb (non-NWAY) repeater or hub, and the
+ * line speed was determined automatically by parallel detection, then
+ * we have no way of knowing exactly what speed the PHY is set to
+ * unless that PHY has a propietary register which indicates speed in
+ * this situation. The NSC TX PHY does have such a register. Also,
+ * since NWAY didn't establish the connection, the duplex setting
+ * should HALF duplex. */
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+
+ if (PhyId == PHY_NSC_TX) {
+ /* Read register 25 to get the SPEED_10 bit */
+ e100_mdi_read(bdp, NSC_SPEED_IND_REG, bdp->phy_addr, &misc_reg);
+
+ /* If bit 6 was set then we're at 10Mbps */
+ if (misc_reg & NSC_TX_SPD_INDC_SPEED)
+ bdp->cur_line_speed = 10;
+ else
+ bdp->cur_line_speed = 100;
+
+ } else {
+ /* If we don't know the line speed, default to 10Mbps */
+ bdp->cur_line_speed = 10;
+ }
+}
+
+/*
+ * Procedure: e100_force_speed_duplex
+ *
+ * Description: This routine forces line speed and duplex mode of the
+ * adapter based on the values the user has set in e100.c.
+ *
+ * Arguments: bdp - Pointer to the e100_private structure for the board
+ *
+ * Returns: void
+ *
+ */
+void
+e100_force_speed_duplex(struct e100_private *bdp)
+{
+ u16 control;
+ unsigned long expires;
+
+ e100_phy_reset(bdp);
+
+ bdp->flags |= DF_SPEED_FORCED;
+
+ e100_mdi_read(bdp, MII_BMCR, bdp->phy_addr, &control);
+ control &= ~BMCR_ANENABLE;
+ control &= ~BMCR_LOOPBACK;
+
+ switch (bdp->params.e100_speed_duplex) {
+ case E100_SPEED_10_HALF:
+ control &= ~BMCR_SPEED100;
+ control &= ~BMCR_FULLDPLX;
+ bdp->cur_line_speed = 10;
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+ break;
+
+ case E100_SPEED_10_FULL:
+ control &= ~BMCR_SPEED100;
+ control |= BMCR_FULLDPLX;
+ bdp->cur_line_speed = 10;
+ bdp->cur_dplx_mode = FULL_DUPLEX;
+ break;
+
+ case E100_SPEED_100_HALF:
+ control |= BMCR_SPEED100;
+ control &= ~BMCR_FULLDPLX;
+ bdp->cur_line_speed = 100;
+ bdp->cur_dplx_mode = HALF_DUPLEX;
+ break;
+
+ case E100_SPEED_100_FULL:
+ control |= BMCR_SPEED100;
+ control |= BMCR_FULLDPLX;
+ bdp->cur_line_speed = 100;
+ bdp->cur_dplx_mode = FULL_DUPLEX;
+ break;
+ }
+
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, control);
+
+ /* loop must run at least once */
+ expires = jiffies + 2 * HZ;
+ do {
+ if (e100_update_link_state(bdp) ||
+ time_after(jiffies, expires)) {
+ break;
+ } else {
+ yield();
+ }
+
+ } while (true);
+}
+
+void
+e100_force_speed_duplex_to_phy(struct e100_private *bdp)
+{
+ u16 control;
+
+ e100_mdi_read(bdp, MII_BMCR, bdp->phy_addr, &control);
+ control &= ~BMCR_ANENABLE;
+ control &= ~BMCR_LOOPBACK;
+
+ switch (bdp->params.e100_speed_duplex) {
+ case E100_SPEED_10_HALF:
+ control &= ~BMCR_SPEED100;
+ control &= ~BMCR_FULLDPLX;
+ break;
+
+ case E100_SPEED_10_FULL:
+ control &= ~BMCR_SPEED100;
+ control |= BMCR_FULLDPLX;
+ break;
+
+ case E100_SPEED_100_HALF:
+ control |= BMCR_SPEED100;
+ control &= ~BMCR_FULLDPLX;
+ break;
+
+ case E100_SPEED_100_FULL:
+ control |= BMCR_SPEED100;
+ control |= BMCR_FULLDPLX;
+ break;
+ }
+
+ /* Send speed/duplex command to PHY layer. */
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, control);
+}
+
+/*
+ * Procedure: e100_set_fc
+ *
+ * Description: Checks the link's capability for flow control.
+ *
+ * Arguments: bdp - Pointer to the e100_private structure for the board
+ *
+ * Returns: void
+ *
+ */
+static void
+e100_set_fc(struct e100_private *bdp)
+{
+ u16 ad_reg;
+ u16 lp_ad_reg;
+ u16 exp_reg;
+
+ /* no flow control for 82557, forced links or half duplex */
+ if (!netif_carrier_ok(bdp->device) || (bdp->flags & DF_SPEED_FORCED) ||
+ (bdp->cur_dplx_mode == HALF_DUPLEX) ||
+ !(bdp->flags & IS_BACHELOR)) {
+
+ bdp->flags &= ~DF_LINK_FC_CAP;
+ return;
+ }
+
+ /* See if link partner is capable of Auto-Negotiation (bit 0, reg 6) */
+ e100_mdi_read(bdp, MII_EXPANSION, bdp->phy_addr, &exp_reg);
+
+ if (exp_reg & EXPANSION_NWAY) {
+ /* Read our advertisement register */
+ e100_mdi_read(bdp, MII_ADVERTISE, bdp->phy_addr, &ad_reg);
+
+ /* Read our link partner's advertisement register */
+ e100_mdi_read(bdp, MII_LPA, bdp->phy_addr, &lp_ad_reg);
+
+ ad_reg &= lp_ad_reg; /* AND the 2 ad registers */
+
+ if (ad_reg & NWAY_AD_FC_SUPPORTED)
+ bdp->flags |= DF_LINK_FC_CAP;
+ else
+ /* If link partner is capable of autoneg, but */
+ /* not capable of flow control, Received PAUSE */
+ /* frames are still honored, i.e., */
+ /* transmitted frames would be paused */
+ /* by incoming PAUSE frames */
+ bdp->flags |= DF_LINK_FC_TX_ONLY;
+
+ } else {
+ bdp->flags &= ~DF_LINK_FC_CAP;
+ }
+}
+
+/*
+ * Procedure: e100_phy_check
+ *
+ * Arguments: bdp - Pointer to the e100_private structure for the board
+ *
+ * Returns: true if link state was changed
+ * false otherwise
+ *
+ */
+unsigned char
+e100_phy_check(struct e100_private *bdp)
+{
+ unsigned char old_link;
+ unsigned char changed = false;
+
+ old_link = netif_carrier_ok(bdp->device) ? 1 : 0;
+ e100_find_speed_duplex(bdp);
+
+ if (!old_link && netif_carrier_ok(bdp->device)) {
+ e100_set_fc(bdp);
+ changed = true;
+ }
+
+ if (old_link && !netif_carrier_ok(bdp->device)) {
+ /* reset the zero lock state */
+ bdp->zlock_state = ZLOCK_INITIAL;
+
+ // set auto lock for phy auto-negotiation on link up
+ if ((bdp->PhyId & PHY_MODEL_REV_ID_MASK) == PHY_82555_TX)
+ e100_mdi_write(bdp, PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, 0);
+ changed = true;
+ }
+
+ e100_phy_fix_squelch(bdp);
+ e100_handle_zlock(bdp);
+
+ return changed;
+}
+
+/*
+ * Procedure: e100_auto_neg
+ *
+ * Description: This routine will start autonegotiation and wait
+ * for it to complete
+ *
+ * Arguments:
+ * bdp - pointer to this card's e100_bdconfig structure
+ * force_restart - defines if autoneg should be restarted even if it
+ * has been completed before
+ * Returns:
+ * NOTHING
+ */
+static void
+e100_auto_neg(struct e100_private *bdp, unsigned char force_restart)
+{
+ u16 stat_reg;
+ unsigned long expires;
+
+ bdp->flags &= ~DF_SPEED_FORCED;
+
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &stat_reg);
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &stat_reg);
+
+ /* if we are capable of performing autoneg then we restart if needed */
+ if ((stat_reg != 0xFFFF) && (stat_reg & BMSR_ANEGCAPABLE)) {
+
+ if ((!force_restart) &&
+ (stat_reg & BMSR_ANEGCOMPLETE)) {
+ goto exit;
+ }
+
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+
+ /* wait for autoneg to complete (up to 3 seconds) */
+ expires = jiffies + HZ * 3;
+ do {
+ /* now re-read the value. Sticky so read twice */
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &stat_reg);
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &stat_reg);
+
+ if ((stat_reg & BMSR_ANEGCOMPLETE) ||
+ time_after(jiffies, expires) ) {
+ goto exit;
+ } else {
+ yield();
+ }
+ } while (true);
+ }
+
+exit:
+ e100_find_speed_duplex(bdp);
+}
+
+void
+e100_phy_set_speed_duplex(struct e100_private *bdp, unsigned char force_restart)
+{
+ if (bdp->params.e100_speed_duplex == E100_AUTONEG) {
+ if (bdp->rev_id >= D102_REV_ID)
+ /* Enable MDI/MDI-X auto switching */
+ e100_mdi_write(bdp, MII_NCONFIG, bdp->phy_addr,
+ MDI_MDIX_AUTO_SWITCH_ENABLE);
+ e100_auto_neg(bdp, force_restart);
+
+ } else {
+ if (bdp->rev_id >= D102_REV_ID)
+ /* Disable MDI/MDI-X auto switching */
+ e100_mdi_write(bdp, MII_NCONFIG, bdp->phy_addr,
+ MDI_MDIX_RESET_ALL_MASK);
+ e100_force_speed_duplex(bdp);
+ }
+
+ e100_set_fc(bdp);
+}
+
+void
+e100_phy_autoneg(struct e100_private *bdp)
+{
+ u16 ctrl_reg;
+
+ ctrl_reg = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
+
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, ctrl_reg);
+
+ udelay(100);
+}
+
+void
+e100_phy_set_loopback(struct e100_private *bdp)
+{
+ u16 ctrl_reg;
+ ctrl_reg = BMCR_LOOPBACK;
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, ctrl_reg);
+ udelay(100);
+}
+
+void
+e100_phy_reset(struct e100_private *bdp)
+{
+ u16 ctrl_reg;
+ ctrl_reg = BMCR_RESET;
+ e100_mdi_write(bdp, MII_BMCR, bdp->phy_addr, ctrl_reg);
+}
+
+unsigned char __devinit
+e100_phy_init(struct e100_private *bdp)
+{
+ e100_phy_address_detect(bdp);
+ e100_phy_isolate(bdp);
+ e100_phy_id_detect(bdp);
+
+ if (!e100_phy_specific_setup(bdp))
+ return false;
+
+ bdp->PhyState = 0;
+ bdp->PhyDelay = 0;
+ bdp->zlock_state = ZLOCK_INITIAL;
+
+ e100_phy_set_speed_duplex(bdp, false);
+ e100_fix_polarity(bdp);
+
+ return true;
+}
+
+/*
+ * Procedure: e100_get_link_state
+ *
+ * Description: This routine checks the link status of the adapter
+ *
+ * Arguments: bdp - Pointer to the e100_private structure for the board
+ *
+ *
+ * Returns: true - If a link is found
+ * false - If there is no link
+ *
+ */
+unsigned char
+e100_get_link_state(struct e100_private *bdp)
+{
+ unsigned char link = false;
+ u16 status;
+
+ /* Check link status */
+ /* If the controller is a 82559 or later one, link status is available
+ * from the CSR. This avoids the mdi_read. */
+ if (bdp->rev_id >= D101MA_REV_ID) {
+ if (readb(&bdp->scb->scb_ext.d101m_scb.scb_gen_stat) & BIT_0) {
+ link = true;
+ } else {
+ link = false;
+ }
+
+ } else {
+ /* Read the status register twice because of sticky bits */
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &status);
+ e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &status);
+
+ if (status & BMSR_LSTATUS) {
+ link = true;
+ } else {
+ link = false;
+ }
+ }
+
+ return link;
+}
+
+/*
+ * Procedure: e100_update_link_state
+ *
+ * Description: This routine updates the link status of the adapter,
+ * also considering netif_running
+ *
+ * Arguments: bdp - Pointer to the e100_private structure for the board
+ *
+ *
+ * Returns: true - If a link is found
+ * false - If there is no link
+ *
+ */
+unsigned char
+e100_update_link_state(struct e100_private *bdp)
+{
+ unsigned char link;
+
+ /* Logical AND PHY link & netif_running */
+ link = e100_get_link_state(bdp) && netif_running(bdp->device);
+
+ if (link) {
+ if (!netif_carrier_ok(bdp->device))
+ netif_carrier_on(bdp->device);
+ } else {
+ if (netif_carrier_ok(bdp->device))
+ netif_carrier_off(bdp->device);
+ }
+
+ return link;
+}
+
+/**************************************************************************\
+ **
+ ** PROC NAME: e100_handle_zlock
+ ** This function manages a state machine that controls
+ ** the driver's zero locking algorithm.
+ ** This function is called by e100_watchdog() every ~2 second.
+ ** States:
+ ** The current link handling state is stored in
+ ** bdp->zlock_state, and is one of:
+ ** ZLOCK_INITIAL, ZLOCK_READING, ZLOCK_SLEEPING
+ ** Detailed description of the states and the transitions
+ ** between states is found below.
+ ** Note that any time the link is down / there is a reset
+ ** state will be changed outside this function to ZLOCK_INITIAL
+ ** Algorithm:
+ ** 1. If link is up & 100 Mbps continue else stay in #1:
+ ** 2. Set 'auto lock'
+ ** 3. Read & Store 100 times 'Zero' locked in 1 sec interval
+ ** 4. If max zero read >= 0xB continue else goto 1
+ ** 5. Set most popular 'Zero' read in #3
+ ** 6. Sleep 5 minutes
+ ** 7. Read number of errors, if it is > 300 goto 2 else goto 6
+ ** Data Structures (in DRIVER_DATA):
+ ** zlock_state - current state of the algorithm
+ ** zlock_read_cnt - counts number of reads (up to 100)
+ ** zlock_read_data[i] - counts number of times 'Zero' read was i, 0 <= i <= 15
+ ** zlock_sleep_cnt - keeps track of "sleep" time (up to 300 secs = 5 minutes)
+ **
+ ** Parameters: DRIVER_DATA *bdp
+ **
+ ** bdp - Pointer to HSM's adapter data space
+ **
+ ** Return Value: NONE
+ **
+ ** See Also: e100_watchdog()
+ **
+ \**************************************************************************/
+void
+e100_handle_zlock(struct e100_private *bdp)
+{
+ u16 pos;
+ u16 eq_reg;
+ u16 err_cnt;
+ u8 mpz; /* Most Popular Zero */
+
+ switch (bdp->zlock_state) {
+ case ZLOCK_INITIAL:
+
+ if (((u8) bdp->rev_id <= D102_REV_ID) ||
+ !(bdp->cur_line_speed == 100) ||
+ !netif_carrier_ok(bdp->device)) {
+ break;
+ }
+
+ /* initialize hw and sw and start reading */
+ e100_mdi_write(bdp, PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, 0);
+ /* reset read counters: */
+ bdp->zlock_read_cnt = 0;
+ for (pos = 0; pos < 16; pos++)
+ bdp->zlock_read_data[pos] = 0;
+ /* start reading in the next call back: */
+ bdp->zlock_state = ZLOCK_READING;
+
+ /* FALL THROUGH !! */
+
+ case ZLOCK_READING:
+ /* state: reading (100 times) zero locked in 1 sec interval
+ * prev states: ZLOCK_INITIAL
+ * next states: ZLOCK_INITIAL, ZLOCK_SLEEPING */
+
+ e100_mdi_read(bdp, PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, &eq_reg);
+ pos = (eq_reg & ZLOCK_ZERO_MASK) >> 4;
+ bdp->zlock_read_data[pos]++;
+ bdp->zlock_read_cnt++;
+
+ if (bdp->zlock_read_cnt == ZLOCK_MAX_READS) {
+ /* check if we read a 'Zero' value of 0xB or greater */
+ if ((bdp->zlock_read_data[0xB]) ||
+ (bdp->zlock_read_data[0xC]) ||
+ (bdp->zlock_read_data[0xD]) ||
+ (bdp->zlock_read_data[0xE]) ||
+ (bdp->zlock_read_data[0xF])) {
+
+ /* we've read 'Zero' value of 0xB or greater,
+ * find most popular 'Zero' value and lock it */
+ mpz = 0;
+ /* this loop finds the most popular 'Zero': */
+ for (pos = 1; pos < 16; pos++) {
+ if (bdp->zlock_read_data[pos] >
+ bdp->zlock_read_data[mpz])
+
+ mpz = pos;
+ }
+ /* now lock the most popular 'Zero': */
+ eq_reg = (ZLOCK_SET_ZERO | mpz);
+ e100_mdi_write(bdp,
+ PHY_82555_MDI_EQUALIZER_CSR,
+ bdp->phy_addr, eq_reg);
+
+ /* sleep for 5 minutes: */
+ bdp->zlock_sleep_cnt = jiffies;
+ bdp->zlock_state = ZLOCK_SLEEPING;
+ /* we will be reading the # of errors after 5
+ * minutes, so we need to reset the error
+ * counters - these registers are self clearing
+ * on read, so read them */
+ e100_mdi_read(bdp, PHY_82555_SYMBOL_ERR,
+ bdp->phy_addr, &err_cnt);
+
+ } else {
+ /* we did not read a 'Zero' value of 0xB or
+ * above. go back to the start */
+ bdp->zlock_state = ZLOCK_INITIAL;
+ }
+
+ }
+ break;
+
+ case ZLOCK_SLEEPING:
+ /* state: sleeping for 5 minutes
+ * prev states: ZLOCK_READING
+ * next states: ZLOCK_READING, ZLOCK_SLEEPING */
+
+ /* if 5 minutes have passed: */
+ if ((jiffies - bdp->zlock_sleep_cnt) >= ZLOCK_MAX_SLEEP) {
+ /* read and sum up the number of errors: */
+ e100_mdi_read(bdp, PHY_82555_SYMBOL_ERR,
+ bdp->phy_addr, &err_cnt);
+ /* if we've more than 300 errors (this number was
+ * calculated according to the spec max allowed errors
+ * (80 errors per 1 million frames) for 5 minutes in
+ * 100 Mbps (or the user specified max BER number) */
+ if (err_cnt > bdp->params.ber) {
+ /* start again in the next callback: */
+ bdp->zlock_state = ZLOCK_INITIAL;
+ } else {
+ /* we don't have more errors than allowed,
+ * sleep for 5 minutes */
+ bdp->zlock_sleep_cnt = jiffies;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+#ifndef _E100_PHY_INC_
+#define _E100_PHY_INC_
+
+#include "e100.h"
+
+/*
+ * Auto-polarity enable/disable
+ * e100_autopolarity = 0 => disable auto-polarity
+ * e100_autopolarity = 1 => enable auto-polarity
+ * e100_autopolarity = 2 => let software determine
+ */
+#define E100_AUTOPOLARITY 2
+
+#define IS_NC3133(bdp) (((bdp)->pdev->subsystem_vendor == 0x0E11) && \
+ ((bdp)->pdev->subsystem_device == 0xB0E1))
+
+#define PHY_503 0
+#define PHY_100_A 0x000003E0
+#define PHY_100_C 0x035002A8
+#define PHY_NSC_TX 0x5c002000
+#define PHY_82562ET 0x033002A8
+#define PHY_82562EM 0x032002A8
+#define PHY_82562EH 0x017002A8
+#define PHY_82555_TX 0x015002a8 /* added this for 82555 */
+#define PHY_OTHER 0xFFFF
+#define MAX_PHY_ADDR 31
+#define MIN_PHY_ADDR 0
+
+#define PHY_MODEL_REV_ID_MASK 0xFFF0FFFF
+
+#define PHY_DEFAULT_ADDRESS 1
+#define PHY_ADDRESS_503 32
+
+/* MDI Control register bit definitions */
+#define MDI_PHY_READY BIT_28 /* PHY is ready for next MDI cycle */
+
+#define MDI_NC3133_CONFIG_REG 0x19
+#define MDI_NC3133_100FX_ENABLE BIT_2
+#define MDI_NC3133_INT_ENABLE_REG 0x17
+#define MDI_NC3133_INT_ENABLE BIT_1
+
+/* MDI Control register opcode definitions */
+#define MDI_WRITE 1 /* Phy Write */
+#define MDI_READ 2 /* Phy read */
+
+/* MDI register set*/
+#define AUTO_NEG_NEXT_PAGE_REG 0x07 /* Auto-negotiation next page xmit */
+#define EXTENDED_REG_0 0x10 /* Extended reg 0 (Phy 100 modes) */
+#define EXTENDED_REG_1 0x14 /* Extended reg 1 (Phy 100 error indications) */
+#define NSC_CONG_CONTROL_REG 0x17 /* National (TX) congestion control */
+#define NSC_SPEED_IND_REG 0x19 /* National (TX) speed indication */
+
+#define HWI_CONTROL_REG 0x1D /* HWI Control register */
+/* MDI/MDI-X Control Register bit definitions */
+#define MDI_MDIX_RES_TIMER BIT_0_3 /* minimum slot time for resolution timer */
+#define MDI_MDIX_CONFIG_IS_OK BIT_4 /* 1 = resolution algorithm completes OK */
+#define MDI_MDIX_STATUS BIT_5 /* 1 = MDIX (croos over), 0 = MDI (straight through) */
+#define MDI_MDIX_SWITCH BIT_6 /* 1 = Forces to MDIX, 0 = Forces to MDI */
+#define MDI_MDIX_AUTO_SWITCH_ENABLE BIT_7 /* 1 = MDI/MDI-X feature enabled */
+#define MDI_MDIX_CONCT_CONFIG BIT_8 /* Sets the MDI/MDI-X connectivity configuration (test prupose only) */
+#define MDI_MDIX_CONCT_TEST_ENABLE BIT_9 /* 1 = Enables connectivity testing */
+#define MDI_MDIX_RESET_ALL_MASK 0x0000
+
+/* HWI Control Register bit definitions */
+#define HWI_TEST_DISTANCE BIT_0_8 /* distance to cable problem */
+#define HWI_TEST_HIGHZ_PROBLEM BIT_9 /* 1 = Open Circuit */
+#define HWI_TEST_LOWZ_PROBLEM BIT_10 /* 1 = Short Circuit */
+#define HWI_TEST_RESERVED (BIT_11 | BIT_12) /* reserved */
+#define HWI_TEST_EXECUTE BIT_13 /* 1 = Execute the HWI test on the PHY */
+#define HWI_TEST_ABILITY BIT_14 /* 1 = test passed */
+#define HWI_TEST_ENABLE BIT_15 /* 1 = Enables the HWI feature */
+#define HWI_RESET_ALL_MASK 0x0000
+
+/* ############Start of 82555 specific defines################## */
+
+/* Intel 82555 specific registers */
+#define PHY_82555_CSR 0x10 /* 82555 CSR */
+#define PHY_82555_SPECIAL_CONTROL 0x11 /* 82555 special control register */
+
+#define PHY_82555_RCV_ERR 0x15 /* 82555 100BaseTx Receive Error
+ * Frame Counter */
+#define PHY_82555_SYMBOL_ERR 0x16 /* 82555 RCV Symbol Error Counter */
+#define PHY_82555_PREM_EOF_ERR 0x17 /* 82555 100BaseTx RCV Premature End
+ * of Frame Error Counter */
+#define PHY_82555_EOF_COUNTER 0x18 /* 82555 end of frame error counter */
+#define PHY_82555_MDI_EQUALIZER_CSR 0x1a /* 82555 specific equalizer reg. */
+
+/* 82555 CSR bits */
+#define PHY_82555_SPEED_BIT BIT_1
+#define PHY_82555_POLARITY_BIT BIT_8
+
+/* 82555 equalizer reg. opcodes */
+#define ENABLE_ZERO_FORCING 0x2010 /* write to ASD conf. reg. 0 */
+#define DISABLE_ZERO_FORCING 0x2000 /* write to ASD conf. reg. 0 */
+
+/* 82555 special control reg. opcodes */
+#define DISABLE_AUTO_POLARITY 0x0010
+#define EXTENDED_SQUELCH_BIT BIT_2
+
+/* ############End of 82555 specific defines##################### */
+
+/* Auto-Negotiation advertisement register bit definitions*/
+#define NWAY_AD_FC_SUPPORTED 0x0400 /* Flow Control supported */
+
+/* Auto-Negotiation link partner ability register bit definitions*/
+#define NWAY_LP_ABILITY 0x07e0 /* technologies supported */
+
+/* PHY 100 Extended Register 0 bit definitions*/
+#define PHY_100_ER0_FDX_INDIC BIT_0 /* 1 = FDX, 0 = half duplex */
+#define PHY_100_ER0_SPEED_INDIC BIT_1 /* 1 = 100Mbps, 0= 10Mbps */
+
+/* National Semiconductor TX phy congestion control register bit definitions*/
+#define NSC_TX_CONG_TXREADY BIT_10 /* Makes TxReady an input */
+#define NSC_TX_CONG_ENABLE BIT_8 /* Enables congestion control */
+
+/* National Semiconductor TX phy speed indication register bit definitions*/
+#define NSC_TX_SPD_INDC_SPEED BIT_6 /* 0 = 100Mbps, 1=10Mbps */
+
+/************* function prototypes ************/
+extern unsigned char e100_phy_init(struct e100_private *bdp);
+extern unsigned char e100_update_link_state(struct e100_private *bdp);
+extern unsigned char e100_phy_check(struct e100_private *bdp);
+extern void e100_phy_set_speed_duplex(struct e100_private *bdp,
+ unsigned char force_restart);
+extern void e100_phy_autoneg(struct e100_private *bdp);
+extern void e100_phy_reset(struct e100_private *bdp);
+extern void e100_phy_set_loopback(struct e100_private *bdp);
+extern int e100_mdi_write(struct e100_private *, u32, u32, u16);
+extern int e100_mdi_read(struct e100_private *, u32, u32, u16 *);
+
+#endif
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+#include "e100.h"
+#include "e100_config.h"
+
+extern u16 e100_eeprom_read(struct e100_private *, u16);
+extern int e100_wait_exec_cmplx(struct e100_private *, u32,u8, u8);
+extern void e100_phy_reset(struct e100_private *bdp);
+extern void e100_phy_autoneg(struct e100_private *bdp);
+extern void e100_phy_set_loopback(struct e100_private *bdp);
+extern void e100_force_speed_duplex(struct e100_private *bdp);
+
+static u8 e100_diag_selftest(struct net_device *);
+static u8 e100_diag_eeprom(struct net_device *);
+static u8 e100_diag_loopback(struct net_device *);
+
+static u8 e100_diag_one_loopback (struct net_device *, u8);
+static u8 e100_diag_rcv_loopback_pkt(struct e100_private *);
+static void e100_diag_config_loopback(struct e100_private *, u8, u8, u8 *,u8 *);
+static u8 e100_diag_loopback_alloc(struct e100_private *);
+static void e100_diag_loopback_cu_ru_exec(struct e100_private *);
+static u8 e100_diag_check_pkt(u8 *);
+static void e100_diag_loopback_free(struct e100_private *);
+
+#define LB_PACKET_SIZE 1500
+
+/**
+ * e100_run_diag - main test execution handler - checks mask of requests and calls the diag routines
+ * @dev: atapter's net device data struct
+ * @test_info: array with test request mask also used to store test results
+ *
+ * RETURNS: updated flags field of struct ethtool_test
+ */
+u32
+e100_run_diag(struct net_device *dev, u64 *test_info, u32 flags)
+{
+ struct e100_private* bdp = dev->priv;
+ u8 test_result = true;
+
+ e100_isolate_driver(bdp);
+
+ if (flags & ETH_TEST_FL_OFFLINE) {
+ u8 fail_mask;
+
+ fail_mask = e100_diag_selftest(dev);
+ if (fail_mask) {
+ test_result = false;
+ if (fail_mask & REGISTER_TEST_FAIL)
+ test_info [E100_REG_TEST_FAIL] = true;
+ if (fail_mask & ROM_TEST_FAIL)
+ test_info [E100_ROM_TEST_FAIL] = true;
+ if (fail_mask & SELF_TEST_FAIL)
+ test_info [E100_MAC_TEST_FAIL] = true;
+ if (fail_mask & TEST_TIMEOUT)
+ test_info [E100_CHIP_TIMEOUT] = true;
+ }
+
+ fail_mask = e100_diag_loopback(dev);
+ if (fail_mask) {
+ test_result = false;
+ if (fail_mask & PHY_LOOPBACK)
+ test_info [E100_LPBK_PHY_FAIL] = true;
+ if (fail_mask & MAC_LOOPBACK)
+ test_info [E100_LPBK_MAC_FAIL] = true;
+ }
+ }
+
+ if (!e100_diag_eeprom(dev)) {
+ test_result = false;
+ test_info [E100_EEPROM_TEST_FAIL] = true;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ * 2);
+ e100_deisolate_driver(bdp, false);
+
+ return flags | (test_result ? 0 : ETH_TEST_FL_FAILED);
+}
+
+/**
+ * e100_diag_selftest - run hardware selftest
+ * @dev: atapter's net device data struct
+ */
+static u8
+e100_diag_selftest(struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+ u32 st_timeout, st_result;
+ u8 retval = 0;
+
+ if (!e100_selftest(bdp, &st_timeout, &st_result)) {
+ if (!st_timeout) {
+ if (st_result & CB_SELFTEST_REGISTER_BIT)
+ retval |= REGISTER_TEST_FAIL;
+ if (st_result & CB_SELFTEST_DIAG_BIT)
+ retval |= SELF_TEST_FAIL;
+ if (st_result & CB_SELFTEST_ROM_BIT)
+ retval |= ROM_TEST_FAIL;
+ } else {
+ retval = TEST_TIMEOUT;
+ }
+ }
+
+ e100_configure_device(bdp);
+
+ return retval;
+}
+
+/**
+ * e100_diag_eeprom - validate eeprom checksum correctness
+ * @dev: atapter's net device data struct
+ *
+ */
+static u8
+e100_diag_eeprom (struct net_device *dev)
+{
+ struct e100_private *bdp = dev->priv;
+ u16 i, eeprom_sum, eeprom_actual_csm;
+
+ for (i = 0, eeprom_sum = 0; i < (bdp->eeprom_size - 1); i++) {
+ eeprom_sum += e100_eeprom_read(bdp, i);
+ }
+
+ eeprom_actual_csm = e100_eeprom_read(bdp, bdp->eeprom_size - 1);
+
+ if (eeprom_actual_csm == (u16)(EEPROM_SUM - eeprom_sum)) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * e100_diag_loopback - performs loopback test
+ * @dev: atapter's net device data struct
+ */
+static u8
+e100_diag_loopback (struct net_device *dev)
+{
+ u8 rc = 0;
+
+ printk(KERN_DEBUG "%s: PHY loopback test starts\n", dev->name);
+ e100_sw_reset(dev->priv, PORT_SELECTIVE_RESET);
+ if (!e100_diag_one_loopback(dev, PHY_LOOPBACK)) {
+ rc |= PHY_LOOPBACK;
+ }
+ printk(KERN_DEBUG "%s: PHY loopback test ends\n", dev->name);
+
+ printk(KERN_DEBUG "%s: MAC loopback test starts\n", dev->name);
+ e100_sw_reset(dev->priv, PORT_SELECTIVE_RESET);
+ if (!e100_diag_one_loopback(dev, MAC_LOOPBACK)) {
+ rc |= MAC_LOOPBACK;
+ }
+ printk(KERN_DEBUG "%s: MAC loopback test ends\n", dev->name);
+
+ return rc;
+}
+
+/**
+ * e100_diag_loopback - performs loopback test
+ * @dev: atapter's net device data struct
+ * @mode: lopback test type
+ */
+static u8
+e100_diag_one_loopback (struct net_device *dev, u8 mode)
+{
+ struct e100_private *bdp = dev->priv;
+ u8 res = false;
+ u8 saved_dynamic_tbd = false;
+ u8 saved_extended_tcb = false;
+
+ if (!e100_diag_loopback_alloc(bdp))
+ return false;
+
+ /* change the config block to standard tcb and the correct loopback */
+ e100_diag_config_loopback(bdp, true, mode,
+ &saved_extended_tcb, &saved_dynamic_tbd);
+
+ e100_diag_loopback_cu_ru_exec(bdp);
+
+ if (e100_diag_rcv_loopback_pkt(bdp)) {
+ res = true;
+ }
+
+ e100_diag_loopback_free(bdp);
+
+ /* change the config block to previous tcb mode and the no loopback */
+ e100_diag_config_loopback(bdp, false, mode,
+ &saved_extended_tcb, &saved_dynamic_tbd);
+ return res;
+}
+
+/**
+ * e100_diag_config_loopback - setup/clear loopback before/after lpbk test
+ * @bdp: atapter's private data struct
+ * @set_loopback: true if the function is called to set lb
+ * @loopback_mode: the loopback mode(MAC or PHY)
+ * @tcb_extended: true if need to set extended tcb mode after clean loopback
+ * @dynamic_tbd: true if needed to set dynamic tbd mode after clean loopback
+ *
+ */
+void
+e100_diag_config_loopback(struct e100_private* bdp,
+ u8 set_loopback,
+ u8 loopback_mode,
+ u8* tcb_extended,
+ u8* dynamic_tbd)
+{
+ /* if set_loopback == true - we want to clear tcb_extended/dynamic_tbd.
+ * the previous values are saved in the params tcb_extended/dynamic_tbd
+ * if set_loopback == false - we want to restore previous value.
+ */
+ if (set_loopback || (*tcb_extended))
+ *tcb_extended = e100_config_tcb_ext_enable(bdp,*tcb_extended);
+
+ if (set_loopback || (*dynamic_tbd))
+ *dynamic_tbd = e100_config_dynamic_tbd(bdp,*dynamic_tbd);
+
+ if (set_loopback) {
+ /* ICH PHY loopback is broken */
+ if (bdp->flags & IS_ICH && loopback_mode == PHY_LOOPBACK)
+ loopback_mode = MAC_LOOPBACK;
+ /* Configure loopback on MAC */
+ e100_config_loopback_mode(bdp,loopback_mode);
+ } else {
+ e100_config_loopback_mode(bdp,NO_LOOPBACK);
+ }
+
+ e100_config(bdp);
+
+ if (loopback_mode == PHY_LOOPBACK) {
+ if (set_loopback)
+ /* Set PHY loopback mode */
+ e100_phy_set_loopback(bdp);
+ else { /* Back to normal speed and duplex */
+ if (bdp->params.e100_speed_duplex == E100_AUTONEG)
+ /* Reset PHY and do autoneg */
+ e100_phy_autoneg(bdp);
+ else
+ /* Reset PHY and force speed and duplex */
+ e100_force_speed_duplex(bdp);
+ }
+ /* Wait for PHY state change */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ } else { /* For MAC loopback wait 500 msec to take effect */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 2);
+ }
+}
+
+/**
+ * e100_diag_loopback_alloc - alloc & initate tcb and rfd for the loopback
+ * @bdp: atapter's private data struct
+ *
+ */
+static u8
+e100_diag_loopback_alloc(struct e100_private *bdp)
+{
+ dma_addr_t dma_handle;
+ tcb_t *tcb;
+ rfd_t *rfd;
+ tbd_t *tbd;
+
+ /* tcb, tbd and transmit buffer are allocated */
+ tcb = pci_alloc_consistent(bdp->pdev,
+ (sizeof (tcb_t) + sizeof (tbd_t) +
+ LB_PACKET_SIZE),
+ &dma_handle);
+ if (tcb == NULL)
+ return false;
+
+ memset(tcb, 0x00, sizeof (tcb_t) + sizeof (tbd_t) + LB_PACKET_SIZE);
+ tcb->tcb_phys = dma_handle;
+ tcb->tcb_hdr.cb_status = 0;
+ tcb->tcb_hdr.cb_cmd =
+ cpu_to_le16(CB_EL_BIT | CB_TRANSMIT | CB_TX_SF_BIT);
+ /* Next command is null */
+ tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(0xffffffff);
+ tcb->tcb_cnt = 0;
+ tcb->tcb_thrshld = bdp->tx_thld;
+ tcb->tcb_tbd_num = 1;
+ /* Set up tcb tbd pointer */
+ tcb->tcb_tbd_ptr = cpu_to_le32(tcb->tcb_phys + sizeof (tcb_t));
+ tbd = (tbd_t *) ((u8 *) tcb + sizeof (tcb_t));
+ /* Set up tbd transmit buffer */
+ tbd->tbd_buf_addr =
+ cpu_to_le32(le32_to_cpu(tcb->tcb_tbd_ptr) + sizeof (tbd_t));
+ tbd->tbd_buf_cnt = __constant_cpu_to_le16(1024);
+ /* The value of first 512 bytes is FF */
+ memset((void *) ((u8 *) tbd + sizeof (tbd_t)), 0xFF, 512);
+ /* The value of second 512 bytes is BA */
+ memset((void *) ((u8 *) tbd + sizeof (tbd_t) + 512), 0xBA, 512);
+ wmb();
+ rfd = pci_alloc_consistent(bdp->pdev, sizeof (rfd_t), &dma_handle);
+
+ if (rfd == NULL) {
+ pci_free_consistent(bdp->pdev,
+ sizeof (tcb_t) + sizeof (tbd_t) +
+ LB_PACKET_SIZE, tcb, tcb->tcb_phys);
+ return false;
+ }
+
+ memset(rfd, 0x00, sizeof (rfd_t));
+
+ /* init all fields in rfd */
+ rfd->rfd_header.cb_cmd = cpu_to_le16(RFD_EL_BIT);
+ rfd->rfd_sz = cpu_to_le16(ETH_FRAME_LEN + CHKSUM_SIZE);
+ /* dma_handle is physical address of rfd */
+ bdp->loopback.dma_handle = dma_handle;
+ bdp->loopback.tcb = tcb;
+ bdp->loopback.rfd = rfd;
+ wmb();
+ return true;
+}
+
+/**
+ * e100_diag_loopback_cu_ru_exec - activates cu and ru to send & receive the pkt
+ * @bdp: atapter's private data struct
+ *
+ */
+static void
+e100_diag_loopback_cu_ru_exec(struct e100_private *bdp)
+{
+ /*load CU & RU base */
+ if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
+ printk(KERN_ERR "e100: SCB_CUC_LOAD_BASE failed\n");
+ if(!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
+ printk(KERN_ERR "e100: SCB_RUC_LOAD_BASE failed!\n");
+ if(!e100_wait_exec_cmplx(bdp, bdp->loopback.dma_handle, SCB_RUC_START, 0))
+ printk(KERN_ERR "e100: SCB_RUC_START failed!\n");
+
+ bdp->next_cu_cmd = START_WAIT;
+ e100_start_cu(bdp, bdp->loopback.tcb);
+ bdp->last_tcb = NULL;
+ rmb();
+}
+/**
+ * e100_diag_check_pkt - checks if a given packet is a loopback packet
+ * @bdp: atapter's private data struct
+ *
+ * Returns true if OK false otherwise.
+ */
+static u8
+e100_diag_check_pkt(u8 *datap)
+{
+ int i;
+ for (i = 0; i<512; i++) {
+ if( !((*datap)==0xFF && (*(datap + 512) == 0xBA)) ) {
+ printk (KERN_ERR "e100: check loopback packet failed at: %x\n", i);
+ return false;
+ }
+ }
+ printk (KERN_DEBUG "e100: Check received loopback packet OK\n");
+ return true;
+}
+
+/**
+ * e100_diag_rcv_loopback_pkt - waits for receive and checks lpbk packet
+ * @bdp: atapter's private data struct
+ *
+ * Returns true if OK false otherwise.
+ */
+static u8
+e100_diag_rcv_loopback_pkt(struct e100_private* bdp)
+{
+ rfd_t *rfdp;
+ u16 rfd_status;
+ unsigned long expires = jiffies + HZ * 2;
+
+ rfdp =bdp->loopback.rfd;
+
+ rfd_status = le16_to_cpu(rfdp->rfd_header.cb_status);
+
+ while (!(rfd_status & RFD_STATUS_COMPLETE)) {
+ if (time_before(jiffies, expires)) {
+ yield();
+ rmb();
+ rfd_status = le16_to_cpu(rfdp->rfd_header.cb_status);
+ } else {
+ break;
+ }
+ }
+
+ if (rfd_status & RFD_STATUS_COMPLETE) {
+ printk(KERN_DEBUG "e100: Loopback packet received\n");
+ return e100_diag_check_pkt(((u8 *)rfdp+bdp->rfd_size));
+ }
+ else {
+ printk(KERN_ERR "e100: Loopback packet not received\n");
+ return false;
+ }
+}
+
+/**
+ * e100_diag_loopback_free - free data allocated for loopback pkt send/receive
+ * @bdp: atapter's private data struct
+ *
+ */
+static void
+e100_diag_loopback_free (struct e100_private *bdp)
+{
+ pci_free_consistent(bdp->pdev,
+ sizeof(tcb_t) + sizeof(tbd_t) + LB_PACKET_SIZE,
+ bdp->loopback.tcb, bdp->loopback.tcb->tcb_phys);
+
+ pci_free_consistent(bdp->pdev, sizeof(rfd_t), bdp->loopback.rfd,
+ bdp->loopback.dma_handle);
+}
+
--- /dev/null
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*******************************************************************************/
+
+#ifndef _E100_UCODE_H_
+#define _E100_UCODE_H_
+
+/*
+e100_ucode.h
+
+This file contains the loadable micro code arrays to implement receive
+bundling on the D101 A-step, D101 B-step, D101M (B-step only), D101S,
+D102 B-step, D102 B-step with TCO work around and D102 C-step.
+
+Each controller has its own specific micro code array. The array for one
+controller is totally incompatible with any other controller, and if used
+will most likely cause the controller to lock up and stop responding to
+the driver. Each micro code array has its own parameter offsets (described
+below), and they each have their own version number.
+*/
+
+/*************************************************************************
+* CPUSaver parameters
+*
+* All CPUSaver parameters are 16-bit literals that are part of a
+* "move immediate value" instruction. By changing the value of
+* the literal in the instruction before the code is loaded, the
+* driver can change algorithm.
+*
+* CPUSAVER_DWORD - This is the location of the instruction that loads
+* the dead-man timer with its inital value. By writing a 16-bit
+* value to the low word of this instruction, the driver can change
+* the timer value. The current default is either x600 or x800;
+* experiments show that the value probably should stay within the
+* range of x200 - x1000.
+*
+* CPUSAVER_BUNDLE_MAX_DWORD - This is the location of the instruction
+* that sets the maximum number of frames that will be bundled. In
+* some situations, such as the TCP windowing algorithm, it may be
+* better to limit the growth of the bundle size than let it go as
+* high as it can, because that could cause too much added latency.
+* The default is six, because this is the number of packets in the
+* default TCP window size. A value of 1 would make CPUSaver indicate
+* an interrupt for every frame received. If you do not want to put
+* a limit on the bundle size, set this value to xFFFF.
+*
+* CPUSAVER_MIN_SIZE_DWORD - This is the location of the instruction
+* that contains a bit-mask describing the minimum size frame that
+* will be bundled. The default masks the lower 7 bits, which means
+* that any frame less than 128 bytes in length will not be bundled,
+* but will instead immediately generate an interrupt. This does
+* not affect the current bundle in any way. Any frame that is 128
+* bytes or large will be bundled normally. This feature is meant
+* to provide immediate indication of ACK frames in a TCP environment.
+* Customers were seeing poor performance when a machine with CPUSaver
+* enabled was sending but not receiving. The delay introduced when
+* the ACKs were received was enough to reduce total throughput, because
+* the sender would sit idle until the ACK was finally seen.
+*
+* The current default is 0xFF80, which masks out the lower 7 bits.
+* This means that any frame which is x7F (127) bytes or smaller
+* will cause an immediate interrupt. Because this value must be a
+* bit mask, there are only a few valid values that can be used. To
+* turn this feature off, the driver can write the value xFFFF to the
+* lower word of this instruction (in the same way that the other
+* parameters are used). Likewise, a value of 0xF800 (2047) would
+* cause an interrupt to be generated for every frame, because all
+* standard Ethernet frames are <= 2047 bytes in length.
+*************************************************************************/
+
+#ifndef UCODE_MAX_DWORDS
+#define UCODE_MAX_DWORDS 134
+#endif
+
+/********************************************************/
+/* CPUSaver micro code for the D101A */
+/********************************************************/
+
+/* Version 2.0 */
+
+/* This value is the same for both A and B step of 558. */
+
+#define D101_CPUSAVER_TIMER_DWORD 72
+#define D101_CPUSAVER_BUNDLE_DWORD UCODE_MAX_DWORDS
+#define D101_CPUSAVER_MIN_SIZE_DWORD UCODE_MAX_DWORDS
+
+#define D101_A_RCVBUNDLE_UCODE \
+{\
+0x03B301BB, 0x0046FFFF, 0xFFFFFFFF, 0x051DFFFF, 0xFFFFFFFF, 0xFFFFFFFF, \
+0x000C0001, 0x00101212, 0x000C0008, 0x003801BC, \
+0x00000000, 0x00124818, 0x000C1000, 0x00220809, \
+0x00010200, 0x00124818, 0x000CFFFC, 0x003803B5, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x0024B81D, 0x00130836, 0x000C0001, \
+0x0026081C, 0x0020C81B, 0x00130824, 0x00222819, \
+0x00101213, 0x00041000, 0x003A03B3, 0x00010200, \
+0x00101B13, 0x00238081, 0x00213049, 0x0038003B, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x0024B83E, 0x00130826, 0x000C0001, \
+0x0026083B, 0x00010200, 0x00134824, 0x000C0001, \
+0x00101213, 0x00041000, 0x0038051E, 0x00101313, \
+0x00010400, 0x00380521, 0x00050600, 0x00100824, \
+0x00101310, 0x00041000, 0x00080600, 0x00101B10, \
+0x0038051E, 0x00000000, 0x00000000, 0x00000000 \
+}
+
+/********************************************************/
+/* CPUSaver micro code for the D101B */
+/********************************************************/
+
+/* Version 2.0 */
+
+#define D101_B0_RCVBUNDLE_UCODE \
+{\
+0x03B401BC, 0x0047FFFF, 0xFFFFFFFF, 0x051EFFFF, 0xFFFFFFFF, 0xFFFFFFFF, \
+0x000C0001, 0x00101B92, 0x000C0008, 0x003801BD, \
+0x00000000, 0x00124818, 0x000C1000, 0x00220809, \
+0x00010200, 0x00124818, 0x000CFFFC, 0x003803B6, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x0024B81D, 0x0013082F, 0x000C0001, \
+0x0026081C, 0x0020C81B, 0x00130837, 0x00222819, \
+0x00101B93, 0x00041000, 0x003A03B4, 0x00010200, \
+0x00101793, 0x00238082, 0x0021304A, 0x0038003C, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x0024B83E, 0x00130826, 0x000C0001, \
+0x0026083B, 0x00010200, 0x00134837, 0x000C0001, \
+0x00101B93, 0x00041000, 0x0038051F, 0x00101313, \
+0x00010400, 0x00380522, 0x00050600, 0x00100837, \
+0x00101310, 0x00041000, 0x00080600, 0x00101790, \
+0x0038051F, 0x00000000, 0x00000000, 0x00000000 \
+}
+
+/********************************************************/
+/* CPUSaver micro code for the D101M (B-step only) */
+/********************************************************/
+
+/* Version 2.10.1 */
+
+/* Parameter values for the D101M B-step */
+#define D101M_CPUSAVER_TIMER_DWORD 78
+#define D101M_CPUSAVER_BUNDLE_DWORD 65
+#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
+
+#define D101M_B_RCVBUNDLE_UCODE \
+{\
+0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
+0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
+0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
+0x00380438, 0x00000000, 0x00140000, 0x00380555, \
+0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
+0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
+0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
+0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
+0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
+0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
+0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
+0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
+0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
+0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
+0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
+0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
+0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
+0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
+0x00380559, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
+0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
+0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
+}
+
+/********************************************************/
+/* CPUSaver micro code for the D101S */
+/********************************************************/
+
+/* Version 1.20.1 */
+
+/* Parameter values for the D101S */
+#define D101S_CPUSAVER_TIMER_DWORD 78
+#define D101S_CPUSAVER_BUNDLE_DWORD 67
+#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
+
+#define D101S_RCVBUNDLE_UCODE \
+{\
+0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
+0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
+0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
+0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
+0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
+0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
+0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
+0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
+0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
+0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
+0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
+0x00101313, 0x00380700, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
+0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
+0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
+0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
+0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
+0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
+0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
+0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
+0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00130831, \
+0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
+0x00041000, 0x00010004, 0x00380700 \
+}
+
+/********************************************************/
+/* CPUSaver micro code for the D102 B-step */
+/********************************************************/
+
+/* Version 2.0 */
+/* Parameter values for the D102 B-step */
+#define D102_B_CPUSAVER_TIMER_DWORD 82
+#define D102_B_CPUSAVER_BUNDLE_DWORD 106
+#define D102_B_CPUSAVER_MIN_SIZE_DWORD 70
+
+#define D102_B_RCVBUNDLE_UCODE \
+{\
+0x006F0276, 0x0EF71FFF, 0x0ED30F86, 0x0D250ED9, 0x1FFF1FFF, 0x1FFF04D2, \
+0x00300001, 0x0140D871, 0x00300008, 0x00E00277, \
+0x01406C57, 0x00816073, 0x008700FA, 0x00E00070, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x01406CBA, 0x00807F9A, 0x00901F9A, 0x0024FFFF, \
+0x014B6F6F, 0x0030FFFE, 0x01407172, 0x01496FBA, \
+0x014B6F72, 0x00308000, 0x01406C52, 0x00912EFC, \
+0x00E00EF8, 0x00000000, 0x00000000, 0x00000000, \
+0x00906F8C, 0x00900F8C, 0x00E00F87, 0x00000000, \
+0x00906ED8, 0x01406C55, 0x00E00ED4, 0x00000000, \
+0x01406C51, 0x0080DFC2, 0x01406C52, 0x00815FC2, \
+0x01406C57, 0x00917FCC, 0x00E01FDD, 0x00000000, \
+0x00822D30, 0x01406C51, 0x0080CD26, 0x01406C52, \
+0x00814D26, 0x01406C57, 0x00916D26, 0x014C6FD7, \
+0x00300000, 0x00841FD2, 0x00300001, 0x0140D772, \
+0x00E012B3, 0x014C6F91, 0x0150710B, 0x01496F72, \
+0x0030FF80, 0x00940EDD, 0x00102000, 0x00038400, \
+0x00E00EDA, 0x00000000, 0x00000000, 0x00000000, \
+0x01406C57, 0x00917FE9, 0x00001000, 0x00E01FE9, \
+0x00200600, 0x0140D76F, 0x00138400, 0x01406FD8, \
+0x0140D96F, 0x00E01FDD, 0x00038400, 0x00102000, \
+0x00971FD7, 0x00101000, 0x00050200, 0x00E804D2, \
+0x014C6FD8, 0x00300001, 0x00840D26, 0x0140D872, \
+0x00E00D26, 0x014C6FD9, 0x00300001, 0x0140D972, \
+0x00941FBD, 0x00102000, 0x00038400, 0x014C6FD8, \
+0x00300006, 0x00840EDA, 0x014F71D8, 0x0140D872, \
+0x00E00EDA, 0x01496F50, 0x00E004D3, 0x00000000, \
+}
+
+/********************************************************/
+/* Micro code for the D102 C-step */
+/********************************************************/
+
+/* Parameter values for the D102 C-step */
+#define D102_C_CPUSAVER_TIMER_DWORD 46
+#define D102_C_CPUSAVER_BUNDLE_DWORD 74
+#define D102_C_CPUSAVER_MIN_SIZE_DWORD 54
+
+#define D102_C_RCVBUNDLE_UCODE \
+{ \
+0x00700279, 0x0E6604E2, 0x02BF0CAE, 0x1508150C, 0x15190E5B, 0x0E840F13, \
+0x00E014D8, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014DC, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014F4, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014E0, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014E7, 0x00000000, 0x00000000, 0x00000000, \
+0x00141000, 0x015D6F0D, 0x00E002C0, 0x00000000, \
+0x00200600, 0x00E0150D, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0030FF80, 0x00940E6A, 0x00038200, 0x00102000, \
+0x00E00E67, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00906E65, 0x00800E60, 0x00E00E5D, 0x00000000, \
+0x00300006, 0x00E0151A, 0x00000000, 0x00000000, \
+0x00906F19, 0x00900F19, 0x00E00F14, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x01406CBA, 0x00807FDA, 0x00901FDA, 0x0024FFFF, \
+0x014B6F6F, 0x0030FFFE, 0x01407172, 0x01496FBA, \
+0x014B6F72, 0x00308000, 0x01406C52, 0x00912E89, \
+0x00E00E85, 0x00000000, 0x00000000, 0x00000000 \
+}
+
+/********************************************************/
+/* Micro code for the D102 E-step */
+/********************************************************/
+
+/* Parameter values for the D102 E-step */
+#define D102_E_CPUSAVER_TIMER_DWORD 42
+#define D102_E_CPUSAVER_BUNDLE_DWORD 54
+#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
+
+#define D102_E_RCVBUNDLE_UCODE \
+{\
+0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x1FFF1FFF, 0x1FFF1FFF, \
+0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
+0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
+0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
+0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
+0x00300006, 0x00E014FB, 0x00000000, 0x00000000 \
+}
+
+#endif /* _E100_UCODE_H_ */
+++ /dev/null
-/* 8390.c: A general NS8390 ethernet driver core for linux. */
-/*
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- This is the chip-specific code for many 8390-based ethernet adaptors.
- This is not a complete driver, it must be combined with board-specific
- code such as ne.c, wd.c, 3c503.c, etc.
-
- Seeing how at least eight drivers use this code, (not counting the
- PCMCIA ones either) it is easy to break some card by what seems like
- a simple innocent change. Please contact me or Donald if you think
- you have found something that needs changing. -- PG
-
-
- Changelog:
-
- Paul Gortmaker : remove set_bit lock, other cleanups.
- Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
- ei_block_input() for eth_io_copy_and_sum().
- Paul Gortmaker : exchange static int ei_pingpong for a #define,
- also add better Tx error handling.
- Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
- Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
- Paul Gortmaker : tweak ANK's above multicast changes a bit.
- Paul Gortmaker : update packet statistics for v2.1.x
- Alan Cox : support arbitary stupid port mappings on the
- 68K Macintosh. Support >16bit I/O spaces
- Paul Gortmaker : add kmod support for auto-loading of the 8390
- module by all drivers that require it.
- Alan Cox : Spinlocking work, added 'BUG_83C690'
- Paul Gortmaker : Separate out Tx timeout code from Tx path.
-
- Sources:
- The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
-
- */
-
-static const char version[] =
- "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <xeno/module.h>
-#include <xeno/kernel.h>
-#include <xeno/sched.h>
-//#include <xeno/fs.h>
-#include <xeno/types.h>
-//#include <xeno/ptrace.h>
-#include <xeno/lib.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <xeno/delay.h>
-#include <xeno/errno.h>
-//#include <xeno/fcntl.h>
-#include <xeno/in.h>
-#include <xeno/interrupt.h>
-#include <xeno/init.h>
-
-#include <xeno/netdevice.h>
-#include <xeno/etherdevice.h>
-
-#define NS8390_CORE
-#include "8390.h"
-
-#define BUG_83C690
-
-/* These are the operational function interfaces to board-specific
- routines.
- void reset_8390(struct net_device *dev)
- Resets the board associated with DEV, including a hardware reset of
- the 8390. This is only called when there is a transmit timeout, and
- it is always followed by 8390_init().
- void block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
- Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
- "page" value uses the 8390's 256-byte pages.
- void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
- Read the 4 byte, page aligned 8390 header. *If* there is a
- subsequent read, it will be of the rest of the packet.
- void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
- Read COUNT bytes from the packet buffer into the skb data area. Start
- reading from RING_OFFSET, the address as the 8390 sees it. This will always
- follow the read of the 8390 header.
-*/
-#define ei_reset_8390 (ei_local->reset_8390)
-#define ei_block_output (ei_local->block_output)
-#define ei_block_input (ei_local->block_input)
-#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
-/* Index to functions. */
-static void ei_tx_intr(struct net_device *dev);
-static void ei_tx_err(struct net_device *dev);
-static void ei_tx_timeout(struct net_device *dev);
-static void ei_receive(struct net_device *dev);
-static void ei_rx_overrun(struct net_device *dev);
-
-/* Routines generic to NS8390-based boards. */
-static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
- int start_page);
-static void set_multicast_list(struct net_device *dev);
-static void do_set_multicast_list(struct net_device *dev);
-
-/*
- * SMP and the 8390 setup.
- *
- * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
- * a page register that controls bank and packet buffer access. We guard
- * this with ei_local->page_lock. Nobody should assume or set the page other
- * than zero when the lock is not held. Lock holders must restore page 0
- * before unlocking. Even pure readers must take the lock to protect in
- * page 0.
- *
- * To make life difficult the chip can also be very slow. We therefore can't
- * just use spinlocks. For the longer lockups we disable the irq the device
- * sits on and hold the lock. We must hold the lock because there is a dual
- * processor case other than interrupts (get stats/set multicast list in
- * parallel with each other and transmit).
- *
- * Note: in theory we can just disable the irq on the card _but_ there is
- * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
- * enter lock, take the queued irq. So we waddle instead of flying.
- *
- * Finally by special arrangement for the purpose of being generally
- * annoying the transmit function is called bh atomic. That places
- * restrictions on the user context callers as disable_irq won't save
- * them.
- */
-
-
-\f
-/**
- * ei_open - Open/initialize the board.
- * @dev: network device to initialize
- *
- * This routine goes all-out, setting everything
- * up anew at each open, even though many of these registers should only
- * need to be set once at boot.
- */
-int ei_open(struct net_device *dev)
-{
- unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
-
- /* This can't happen unless somebody forgot to call ethdev_init(). */
- if (ei_local == NULL)
- {
- printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
- return -ENXIO;
- }
-
- /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
- wrapper that does e.g. media check & then calls ei_tx_timeout. */
- if (dev->tx_timeout == NULL)
- dev->tx_timeout = ei_tx_timeout;
- if (dev->watchdog_timeo <= 0)
- dev->watchdog_timeo = TX_TIMEOUT;
-
- /*
- * Grab the page lock so we own the register set, then call
- * the init function.
- */
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- NS8390_init(dev, 1);
- /* Set the flag before we drop the lock, That way the IRQ arrives
- after its set and we get no silly warnings */
- netif_start_queue(dev);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
- ei_local->irqlock = 0;
- return 0;
-}
-
-/**
- * ei_close - shut down network device
- * @dev: network device to close
- *
- * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
- */
-int ei_close(struct net_device *dev)
-{
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- unsigned long flags;
-
- /*
- * Hold the page lock during close
- */
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- NS8390_init(dev, 0);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
- netif_stop_queue(dev);
- return 0;
-}
-
-/**
- * ei_tx_timeout - handle transmit time out condition
- * @dev: network device which has apparently fallen asleep
- *
- * Called by kernel when device never acknowledges a transmit has
- * completed (or failed) - i.e. never posted a Tx related interrupt.
- */
-
-void ei_tx_timeout(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
- unsigned long flags;
-
- ei_local->stat.tx_errors++;
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- txsr = inb(e8390_base+EN0_TSR);
- isr = inb(e8390_base+EN0_ISR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
- printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
-
- if (!isr && !ei_local->stat.tx_packets)
- {
- /* The 8390 probably hasn't gotten on the cable yet. */
- ei_local->interface_num ^= 1; /* Try a different xcvr. */
- }
-
- /* Ugly but a reset can be slow, yet must be protected */
-
- disable_irq_nosync(dev->irq);
- spin_lock(&ei_local->page_lock);
-
- /* Try to restart the card. Perhaps the user has fixed something. */
- ei_reset_8390(dev);
- NS8390_init(dev, 1);
-
- spin_unlock(&ei_local->page_lock);
- enable_irq(dev->irq);
- netif_wake_queue(dev);
-}
-
-/**
- * ei_start_xmit - begin packet transmission
- * @skb: packet to be sent
- * @dev: network device to which packet is sent
- *
- * Sends a packet to an 8390 network device.
- */
-
-static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- int length, send_length, output_page;
- unsigned long flags;
-
- length = skb->len;
-
- /* Mask interrupts from the ethercard.
- SMP: We have to grab the lock here otherwise the IRQ handler
- on another CPU can flip window and race the IRQ mask set. We end
- up trashing the mcast filter not disabling irqs if we dont lock */
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- outb_p(0x00, e8390_base + EN0_IMR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
-
- /*
- * Slow phase with lock held.
- */
-
- disable_irq_nosync(dev->irq);
-
- spin_lock(&ei_local->page_lock);
-
- ei_local->irqlock = 1;
-
- send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
-
-#ifdef EI_PINGPONG
-
- /*
- * We have two Tx slots available for use. Find the first free
- * slot, and then perform some sanity checks. With two Tx bufs,
- * you get very close to transmitting back-to-back packets. With
- * only one Tx buf, the transmitter sits idle while you reload the
- * card, leaving a substantial gap between each transmitted packet.
- */
-
- if (ei_local->tx1 == 0)
- {
- output_page = ei_local->tx_start_page;
- ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
- }
- else if (ei_local->tx2 == 0)
- {
- output_page = ei_local->tx_start_page + TX_1X_PAGES;
- ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
- }
- else
- { /* We should never get here. */
- if (ei_debug)
- printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
- ei_local->irqlock = 0;
- netif_stop_queue(dev);
- outb_p(ENISR_ALL, e8390_base + EN0_IMR);
- spin_unlock(&ei_local->page_lock);
- enable_irq(dev->irq);
- ei_local->stat.tx_errors++;
- return 1;
- }
-
- /*
- * Okay, now upload the packet and trigger a send if the transmitter
- * isn't already sending. If it is busy, the interrupt handler will
- * trigger the send later, upon receiving a Tx done interrupt.
- */
-
- ei_block_output(dev, length, skb->data, output_page);
- if (! ei_local->txing)
- {
- ei_local->txing = 1;
- NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
- if (output_page == ei_local->tx_start_page)
- {
- ei_local->tx1 = -1;
- ei_local->lasttx = -1;
- }
- else
- {
- ei_local->tx2 = -1;
- ei_local->lasttx = -2;
- }
- }
- else ei_local->txqueue++;
-
- if (ei_local->tx1 && ei_local->tx2)
- netif_stop_queue(dev);
- else
- netif_start_queue(dev);
-
-#else /* EI_PINGPONG */
-
- /*
- * Only one Tx buffer in use. You need two Tx bufs to come close to
- * back-to-back transmits. Expect a 20 -> 25% performance hit on
- * reasonable hardware if you only use one Tx buffer.
- */
-
- ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
- ei_local->txing = 1;
- NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
- dev->trans_start = jiffies;
- netif_stop_queue(dev);
-
-#endif /* EI_PINGPONG */
-
- /* Turn 8390 interrupts back on. */
- ei_local->irqlock = 0;
- outb_p(ENISR_ALL, e8390_base + EN0_IMR);
-
- spin_unlock(&ei_local->page_lock);
- enable_irq(dev->irq);
-
- dev_kfree_skb (skb);
- ei_local->stat.tx_bytes += send_length;
-
- return 0;
-}
-\f
-/**
- * ei_interrupt - handle the interrupts from an 8390
- * @irq: interrupt number
- * @dev_id: a pointer to the net_device
- * @regs: unused
- *
- * Handle the ether interface interrupts. We pull packets from
- * the 8390 via the card specific functions and fire them at the networking
- * stack. We also handle transmit completions and wake the transmit path if
- * neccessary. We also update the counters and do other housekeeping as
- * needed.
- */
-
-void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
-{
- struct net_device *dev = dev_id;
- long e8390_base;
- int interrupts, nr_serviced = 0;
- struct ei_device *ei_local;
-
- if (dev == NULL)
- {
- printk ("net_interrupt(): irq %d for unknown device.\n", irq);
- return;
- }
-
- e8390_base = dev->base_addr;
- ei_local = (struct ei_device *) dev->priv;
-
- /*
- * Protect the irq test too.
- */
-
- spin_lock(&ei_local->page_lock);
-
- if (ei_local->irqlock)
- {
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
- /* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
- dev->name, inb_p(e8390_base + EN0_ISR),
- inb_p(e8390_base + EN0_IMR));
-#endif
- spin_unlock(&ei_local->page_lock);
- return;
- }
-
- /* Change to page 0 and read the intr status reg. */
- outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
- if (ei_debug > 3)
- printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
- inb_p(e8390_base + EN0_ISR));
-
- /* !!Assumption!! -- we stay in page 0. Don't break this. */
- while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
- && ++nr_serviced < MAX_SERVICE)
- {
- if (!netif_running(dev)) {
- printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
- /* rmk - acknowledge the interrupts */
- outb_p(interrupts, e8390_base + EN0_ISR);
- interrupts = 0;
- break;
- }
- if (interrupts & ENISR_OVER)
- ei_rx_overrun(dev);
- else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
- {
- /* Got a good (?) packet. */
- ei_receive(dev);
- }
- /* Push the next to-transmit packet through. */
- if (interrupts & ENISR_TX)
- ei_tx_intr(dev);
- else if (interrupts & ENISR_TX_ERR)
- ei_tx_err(dev);
-
- if (interrupts & ENISR_COUNTERS)
- {
- ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
- ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
- ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
- outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
- }
-
- /* Ignore any RDC interrupts that make it back to here. */
- if (interrupts & ENISR_RDC)
- {
- outb_p(ENISR_RDC, e8390_base + EN0_ISR);
- }
-
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
- }
-
- if (interrupts && ei_debug)
- {
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
- if (nr_serviced >= MAX_SERVICE)
- {
- /* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
- dev->name, interrupts);
- outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
- } else {
- printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
- outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
- }
- }
- spin_unlock(&ei_local->page_lock);
- return;
-}
-
-/**
- * ei_tx_err - handle transmitter error
- * @dev: network device which threw the exception
- *
- * A transmitter error has happened. Most likely excess collisions (which
- * is a fairly normal condition). If the error is one where the Tx will
- * have been aborted, we try and send another one right away, instead of
- * letting the failed packet sit and collect dust in the Tx buffer. This
- * is a much better solution as it avoids kernel based Tx timeouts, and
- * an unnecessary card reset.
- *
- * Called with lock held.
- */
-
-static void ei_tx_err(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- unsigned char txsr = inb_p(e8390_base+EN0_TSR);
- unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
-
-#ifdef VERBOSE_ERROR_DUMP
- printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
- if (txsr & ENTSR_ABT)
- printk("excess-collisions ");
- if (txsr & ENTSR_ND)
- printk("non-deferral ");
- if (txsr & ENTSR_CRS)
- printk("lost-carrier ");
- if (txsr & ENTSR_FU)
- printk("FIFO-underrun ");
- if (txsr & ENTSR_CDH)
- printk("lost-heartbeat ");
- printk("\n");
-#endif
-
- outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
-
- if (tx_was_aborted)
- ei_tx_intr(dev);
- else
- {
- ei_local->stat.tx_errors++;
- if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
- if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
- if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
- }
-}
-
-/**
- * ei_tx_intr - transmit interrupt handler
- * @dev: network device for which tx intr is handled
- *
- * We have finished a transmit: check for errors and then trigger the next
- * packet to be sent. Called with lock held.
- */
-
-static void ei_tx_intr(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- int status = inb(e8390_base + EN0_TSR);
-
- outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
-
-#ifdef EI_PINGPONG
-
- /*
- * There are two Tx buffers, see which one finished, and trigger
- * the send of another one if it exists.
- */
- ei_local->txqueue--;
-
- if (ei_local->tx1 < 0)
- {
- if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
- printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx1);
- ei_local->tx1 = 0;
- if (ei_local->tx2 > 0)
- {
- ei_local->txing = 1;
- NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
- ei_local->tx2 = -1,
- ei_local->lasttx = 2;
- }
- else ei_local->lasttx = 20, ei_local->txing = 0;
- }
- else if (ei_local->tx2 < 0)
- {
- if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx2);
- ei_local->tx2 = 0;
- if (ei_local->tx1 > 0)
- {
- ei_local->txing = 1;
- NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
- ei_local->tx1 = -1;
- ei_local->lasttx = 1;
- }
- else
- ei_local->lasttx = 10, ei_local->txing = 0;
- }
-// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
-// dev->name, ei_local->lasttx);
-
-#else /* EI_PINGPONG */
- /*
- * Single Tx buffer: mark it free so another packet can be loaded.
- */
- ei_local->txing = 0;
-#endif
-
- /* Minimize Tx latency: update the statistics after we restart TXing. */
- if (status & ENTSR_COL)
- ei_local->stat.collisions++;
- if (status & ENTSR_PTX)
- ei_local->stat.tx_packets++;
- else
- {
- ei_local->stat.tx_errors++;
- if (status & ENTSR_ABT)
- {
- ei_local->stat.tx_aborted_errors++;
- ei_local->stat.collisions += 16;
- }
- if (status & ENTSR_CRS)
- ei_local->stat.tx_carrier_errors++;
- if (status & ENTSR_FU)
- ei_local->stat.tx_fifo_errors++;
- if (status & ENTSR_CDH)
- ei_local->stat.tx_heartbeat_errors++;
- if (status & ENTSR_OWC)
- ei_local->stat.tx_window_errors++;
- }
- netif_wake_queue(dev);
-}
-
-/**
- * ei_receive - receive some packets
- * @dev: network device with which receive will be run
- *
- * We have a good packet(s), get it/them out of the buffers.
- * Called with lock held.
- */
-
-static void ei_receive(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- unsigned char rxing_page, this_frame, next_frame;
- unsigned short current_offset;
- int rx_pkt_count = 0;
- struct e8390_pkt_hdr rx_frame;
- int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
-
- while (++rx_pkt_count < 10)
- {
- int pkt_len, pkt_stat;
-
- /* Get the rx page (incoming packet pointer). */
- outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
- rxing_page = inb_p(e8390_base + EN1_CURPAG);
- outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
-
- /* Remove one frame from the ring. Boundary is always a page behind. */
- this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
- if (this_frame >= ei_local->stop_page)
- this_frame = ei_local->rx_start_page;
-
- /* Someday we'll omit the previous, iff we never get this message.
- (There is at least one clone claimed to have a problem.)
-
- Keep quiet if it looks like a card removal. One problem here
- is that some clones crash in roughly the same way.
- */
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
- dev->name, this_frame, ei_local->current_page);
-
- if (this_frame == rxing_page) /* Read all the frames? */
- break; /* Done for now */
-
- current_offset = this_frame << 8;
- ei_get_8390_hdr(dev, &rx_frame, this_frame);
-
- pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
- pkt_stat = rx_frame.status;
-
- next_frame = this_frame + 1 + ((pkt_len+4)>>8);
-
- /* Check for bogosity warned by 3c503 book: the status byte is never
- written. This happened a lot during testing! This code should be
- cleaned up someday. */
- if (rx_frame.next != next_frame
- && rx_frame.next != next_frame + 1
- && rx_frame.next != next_frame - num_rx_pages
- && rx_frame.next != next_frame + 1 - num_rx_pages) {
- ei_local->current_page = rxing_page;
- outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
- ei_local->stat.rx_errors++;
- continue;
- }
-
- if (pkt_len < 60 || pkt_len > 1518)
- {
- if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
- dev->name, rx_frame.count, rx_frame.status,
- rx_frame.next);
- ei_local->stat.rx_errors++;
- ei_local->stat.rx_length_errors++;
- }
- else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
- {
- struct sk_buff *skb;
-
- skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
- {
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
- dev->name, pkt_len);
- ei_local->stat.rx_dropped++;
- break;
- }
- else
- {
- skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
- skb->dev = dev;
- skb_put(skb, pkt_len); /* Make room */
- ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->last_rx = jiffies;
- ei_local->stat.rx_packets++;
- ei_local->stat.rx_bytes += pkt_len;
- if (pkt_stat & ENRSR_PHY)
- ei_local->stat.multicast++;
- }
- }
- else
- {
- if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- dev->name, rx_frame.status, rx_frame.next,
- rx_frame.count);
- ei_local->stat.rx_errors++;
- /* NB: The NIC counts CRC, frame and missed errors. */
- if (pkt_stat & ENRSR_FO)
- ei_local->stat.rx_fifo_errors++;
- }
- next_frame = rx_frame.next;
-
- /* This _should_ never happen: it's here for avoiding bad clones. */
- if (next_frame >= ei_local->stop_page) {
- printk("%s: next frame inconsistency, %#2x\n", dev->name,
- next_frame);
- next_frame = ei_local->rx_start_page;
- }
- ei_local->current_page = next_frame;
- outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
- }
-
- /* We used to also ack ENISR_OVER here, but that would sometimes mask
- a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
- outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
- return;
-}
-
-/**
- * ei_rx_overrun - handle receiver overrun
- * @dev: network device which threw exception
- *
- * We have a receiver overrun: we have to kick the 8390 to get it started
- * again. Problem is that you have to kick it exactly as NS prescribes in
- * the updated datasheets, or "the NIC may act in an unpredictable manner."
- * This includes causing "the NIC to defer indefinitely when it is stopped
- * on a busy network." Ugh.
- * Called with lock held. Don't call this with the interrupts off or your
- * computer will hate you - it takes 10ms or so.
- */
-
-static void ei_rx_overrun(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- unsigned char was_txing, must_resend = 0;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
-
- /*
- * Record whether a Tx was in progress and then issue the
- * stop command.
- */
- was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
- ei_local->stat.rx_over_errors++;
-
- /*
- * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
- * Early datasheets said to poll the reset bit, but now they say that
- * it "is not a reliable indicator and subsequently should be ignored."
- * We wait at least 10ms.
- */
-
- udelay(10*1000);
-
- /*
- * Reset RBCR[01] back to zero as per magic incantation.
- */
- outb_p(0x00, e8390_base+EN0_RCNTLO);
- outb_p(0x00, e8390_base+EN0_RCNTHI);
-
- /*
- * See if any Tx was interrupted or not. According to NS, this
- * step is vital, and skipping it will cause no end of havoc.
- */
-
- if (was_txing)
- {
- unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
- if (!tx_completed)
- must_resend = 1;
- }
-
- /*
- * Have to enter loopback mode and then restart the NIC before
- * you are allowed to slurp packets up off the ring.
- */
- outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
- outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
-
- /*
- * Clear the Rx ring of all the debris, and ack the interrupt.
- */
- ei_receive(dev);
- outb_p(ENISR_OVER, e8390_base+EN0_ISR);
-
- /*
- * Leave loopback mode, and resend any packet that got stopped.
- */
- outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
- if (must_resend)
- outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
-}
-
-/*
- * Collect the stats. This is called unlocked and from several contexts.
- */
-
-static struct net_device_stats *get_stats(struct net_device *dev)
-{
- long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- unsigned long flags;
-
- /* If the card is stopped, just return the present stats. */
- if (!netif_running(dev))
- return &ei_local->stat;
-
- spin_lock_irqsave(&ei_local->page_lock,flags);
- /* Read the counter registers, assuming we are in page 0. */
- ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
- ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
- ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
- return &ei_local->stat;
-}
-
-/*
- * Update the given Autodin II CRC value with another data byte.
- */
-
-static inline u32 update_crc(u8 byte, u32 current_crc)
-{
- int bit;
- u8 ah = 0;
- for (bit=0; bit<8; bit++)
- {
- u8 carry = (current_crc>>31);
- current_crc <<= 1;
- ah = ((ah<<1) | carry) ^ byte;
- if (ah&1)
- current_crc ^= 0x04C11DB7; /* CRC polynomial */
- ah >>= 1;
- byte >>= 1;
- }
- return current_crc;
-}
-
-/*
- * Form the 64 bit 8390 multicast table from the linked list of addresses
- * associated with this dev structure.
- */
-
-static inline void make_mc_bits(u8 *bits, struct net_device *dev)
-{
- struct dev_mc_list *dmi;
-
- for (dmi=dev->mc_list; dmi; dmi=dmi->next)
- {
- int i;
- u32 crc;
- if (dmi->dmi_addrlen != ETH_ALEN)
- {
- printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
- continue;
- }
- crc = 0xffffffff; /* initial CRC value */
- for (i=0; i<ETH_ALEN; i++)
- crc = update_crc(dmi->dmi_addr[i], crc);
- /*
- * The 8390 uses the 6 most significant bits of the
- * CRC to index the multicast table.
- */
- bits[crc>>29] |= (1<<((crc>>26)&7));
- }
-}
-
-/**
- * do_set_multicast_list - set/clear multicast filter
- * @dev: net device for which multicast filter is adjusted
- *
- * Set or clear the multicast filter for this adaptor. May be called
- * from a BH in 2.1.x. Must be called with lock held.
- */
-
-static void do_set_multicast_list(struct net_device *dev)
-{
- long e8390_base = dev->base_addr;
- int i;
- struct ei_device *ei_local = (struct ei_device*)dev->priv;
-
- if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
- {
- memset(ei_local->mcfilter, 0, 8);
- if (dev->mc_list)
- make_mc_bits(ei_local->mcfilter, dev);
- }
- else
- memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
-
- /*
- * DP8390 manuals don't specify any magic sequence for altering
- * the multicast regs on an already running card. To be safe, we
- * ensure multicast mode is off prior to loading up the new hash
- * table. If this proves to be not enough, we can always resort
- * to stopping the NIC, loading the table and then restarting.
- *
- * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
- * Elite16) appear to be write-only. The NS 8390 data sheet lists
- * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
- * Ultra32 EISA) appears to have this bug fixed.
- */
-
- if (netif_running(dev))
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
- outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
- for(i = 0; i < 8; i++)
- {
- outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
-#ifndef BUG_83C690
- if(inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
- printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
-#endif
- }
- outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
-
- if(dev->flags&IFF_PROMISC)
- outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
- else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
- outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
- else
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
- }
-
-/*
- * Called without lock held. This is invoked from user context and may
- * be parallel to just about everything else. Its also fairly quick and
- * not called too often. Must protect against both bh and irq users
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- unsigned long flags;
- struct ei_device *ei_local = (struct ei_device*)dev->priv;
-
- spin_lock_irqsave(&ei_local->page_lock, flags);
- do_set_multicast_list(dev);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
-}
-
-/**
- * ethdev_init - init rest of 8390 device struct
- * @dev: network device structure to init
- *
- * Initialize the rest of the 8390 device structure. Do NOT __init
- * this, as it is used by 8390 based modular drivers too.
- */
-
-int ethdev_init(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk(version);
-
- if (dev->priv == NULL)
- {
- struct ei_device *ei_local;
-
- dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
- if (dev->priv == NULL)
- return -ENOMEM;
- memset(dev->priv, 0, sizeof(struct ei_device));
- ei_local = (struct ei_device *)dev->priv;
- spin_lock_init(&ei_local->page_lock);
- }
-
- dev->hard_start_xmit = &ei_start_xmit;
- dev->get_stats = get_stats;
- dev->set_multicast_list = &set_multicast_list;
-
- ether_setup(dev);
-
- return 0;
-}
-\f
-
-
-/* This page of functions should be 8390 generic */
-/* Follow National Semi's recommendations for initializing the "NIC". */
-
-/**
- * NS8390_init - initialize 8390 hardware
- * @dev: network device to initialize
- * @startp: boolean. non-zero value to initiate chip processing
- *
- * Must be called with lock held.
- */
-
-void NS8390_init(struct net_device *dev, int startp)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) dev->priv;
- int i;
- int endcfg = ei_local->word16
- ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
- : 0x48;
-
- if(sizeof(struct e8390_pkt_hdr)!=4)
- panic("8390.c: header struct mispacked\n");
- /* Follow National Semi's recommendations for initing the DP83902. */
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
- outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
- /* Clear the remote byte count registers. */
- outb_p(0x00, e8390_base + EN0_RCNTLO);
- outb_p(0x00, e8390_base + EN0_RCNTHI);
- /* Set to monitor and loopback mode -- this is vital!. */
- outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
- outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
- /* Set the transmit page and receive ring. */
- outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
- ei_local->tx1 = ei_local->tx2 = 0;
- outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
- outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
- ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
- outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
- /* Clear the pending interrupts and mask. */
- outb_p(0xFF, e8390_base + EN0_ISR);
- outb_p(0x00, e8390_base + EN0_IMR);
-
- /* Copy the station address into the DS8390 registers. */
-
- outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
- for(i = 0; i < 6; i++)
- {
- outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
- printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
- }
-
- outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- netif_start_queue(dev);
- ei_local->tx1 = ei_local->tx2 = 0;
- ei_local->txing = 0;
-
- if (startp)
- {
- outb_p(0xff, e8390_base + EN0_ISR);
- outb_p(ENISR_ALL, e8390_base + EN0_IMR);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
- outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
- /* 3c503 TechMan says rxconfig only after the NIC is started. */
- outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
- do_set_multicast_list(dev); /* (re)load the mcast table */
- }
-}
-
-/* Trigger a transmit start, assuming the length is valid.
- Always called with the page lock held */
-
-static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
- int start_page)
-{
- long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) dev->priv;
-
- outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
-
- if (inb_p(e8390_base) & E8390_TRANS)
- {
- printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
- dev->name);
- return;
- }
- outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
- outb_p(length >> 8, e8390_base + EN0_TCNTHI);
- outb_p(start_page, e8390_base + EN0_TPSR);
- outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
-}
-
-EXPORT_SYMBOL(ei_open);
-EXPORT_SYMBOL(ei_close);
-EXPORT_SYMBOL(ei_interrupt);
-EXPORT_SYMBOL(ei_tx_timeout);
-EXPORT_SYMBOL(ethdev_init);
-EXPORT_SYMBOL(NS8390_init);
-
-#if defined(MODULE)
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-
-#endif /* MODULE */
+++ /dev/null
-/* Generic NS8390 register definitions. */
-/* This file is part of Donald Becker's 8390 drivers, and is distributed
- under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
- Some of these names and comments originated from the Crynwr
- packet drivers, which are distributed under the GPL. */
-
-#ifndef _8390_h
-#define _8390_h
-
-#include <xeno/config.h>
-#include <xeno/if_ether.h>
-#include <xeno/ioport.h>
-#include <xeno/skbuff.h>
-
-#define TX_2X_PAGES 12
-#define TX_1X_PAGES 6
-
-/* Should always use two Tx slots to get back-to-back transmits. */
-#define EI_PINGPONG
-
-#ifdef EI_PINGPONG
-#define TX_PAGES TX_2X_PAGES
-#else
-#define TX_PAGES TX_1X_PAGES
-#endif
-
-#define ETHER_ADDR_LEN 6
-
-/* The 8390 specific per-packet-header format. */
-struct e8390_pkt_hdr {
- unsigned char status; /* status */
- unsigned char next; /* pointer to next packet. */
- unsigned short count; /* header + packet length in bytes */
-};
-
-#ifdef notdef
-extern int ei_debug;
-#else
-#define ei_debug 1
-#endif
-
-#ifndef HAVE_AUTOIRQ
-/* From auto_irq.c */
-extern void autoirq_setup(int waittime);
-extern unsigned long autoirq_report(int waittime);
-#endif
-
-extern int ethdev_init(struct net_device *dev);
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern void ei_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-
-/* Most of these entries should be in 'struct net_device' (or most of the
- things in there should be here!) */
-/* You have one of these per-board */
-struct ei_device {
- const char *name;
- void (*reset_8390)(struct net_device *);
- void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int);
- void (*block_output)(struct net_device *, int, const unsigned char *, int);
- void (*block_input)(struct net_device *, int, struct sk_buff *, int);
- unsigned char mcfilter[8];
- unsigned open:1;
- unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
- unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */
- /* set this on random 8390 clones! */
- unsigned txing:1; /* Transmit Active */
- unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
- unsigned dmaing:1; /* Remote DMA Active */
- unsigned char tx_start_page, rx_start_page, stop_page;
- unsigned char current_page; /* Read pointer in buffer */
- unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
- unsigned char txqueue; /* Tx Packet buffer queue length. */
- short tx1, tx2; /* Packet lengths for ping-pong tx. */
- short lasttx; /* Alpha version consistency check. */
- unsigned char reg0; /* Register '0' in a WD8013 */
- unsigned char reg5; /* Register '5' in a WD8013 */
- unsigned char saved_irq; /* Original dev->irq value. */
- struct net_device_stats stat; /* The new statistics table. */
- u32 *reg_offset; /* Register mapping table */
- spinlock_t page_lock; /* Page register locks */
- unsigned long priv; /* Private field to store bus IDs etc. */
-};
-
-/* The maximum number of 8390 interrupt service routines called per IRQ. */
-#define MAX_SERVICE 12
-
-/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
-#define TX_TIMEOUT (20*HZ/100)
-
-#define ei_status (*(struct ei_device *)(dev->priv))
-
-/* Some generic ethernet register configurations. */
-#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
-#define E8390_RX_IRQ_MASK 0x5
-#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
-#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
-#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
-#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
-
-/* Register accessed at EN_CMD, the 8390 base addr. */
-#define E8390_STOP 0x01 /* Stop and reset the chip */
-#define E8390_START 0x02 /* Start the chip, clear reset */
-#define E8390_TRANS 0x04 /* Transmit a frame */
-#define E8390_RREAD 0x08 /* Remote read */
-#define E8390_RWRITE 0x10 /* Remote write */
-#define E8390_NODMA 0x20 /* Remote DMA */
-#define E8390_PAGE0 0x00 /* Select page chip registers */
-#define E8390_PAGE1 0x40 /* using the two high-order bits */
-#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
-
-/*
- * Only generate indirect loads given a machine that needs them.
- */
-
-#if defined(CONFIG_MAC) || defined(CONFIG_AMIGA_PCMCIA) || \
- defined(CONFIG_ARIADNE2) || defined(CONFIG_ARIADNE2_MODULE) || \
- defined(CONFIG_HYDRA) || defined(CONFIG_HYDRA_MODULE) || \
- defined(CONFIG_ARM_ETHERH) || defined(CONFIG_ARM_ETHERH_MODULE)
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#else
-#define EI_SHIFT(x) (x)
-#endif
-
-#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */
-/* Page 0 register offsets. */
-#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */
-#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */
-#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */
-#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */
-#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */
-#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */
-#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */
-#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */
-#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */
-#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */
-#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */
-#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */
-#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */
-#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */
-#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */
-#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */
-#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */
-#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */
-#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */
-#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */
-#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */
-#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */
-#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */
-#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */
-#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */
-#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */
-
-/* Bits in EN0_ISR - Interrupt status register */
-#define ENISR_RX 0x01 /* Receiver, no error */
-#define ENISR_TX 0x02 /* Transmitter, no error */
-#define ENISR_RX_ERR 0x04 /* Receiver, with error */
-#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
-#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
-#define ENISR_COUNTERS 0x20 /* Counters need emptying */
-#define ENISR_RDC 0x40 /* remote dma complete */
-#define ENISR_RESET 0x80 /* Reset completed */
-#define ENISR_ALL 0x3f /* Interrupts we will enable */
-
-/* Bits in EN0_DCFG - Data config register */
-#define ENDCFG_WTS 0x01 /* word transfer mode selection */
-#define ENDCFG_BOS 0x02 /* byte order selection */
-
-/* Page 1 register offsets. */
-#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */
-#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */
-#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */
-#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */
-#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */
-
-/* Bits in received packet status byte and EN0_RSR*/
-#define ENRSR_RXOK 0x01 /* Received a good packet */
-#define ENRSR_CRC 0x02 /* CRC error */
-#define ENRSR_FAE 0x04 /* frame alignment error */
-#define ENRSR_FO 0x08 /* FIFO overrun */
-#define ENRSR_MPA 0x10 /* missed pkt */
-#define ENRSR_PHY 0x20 /* physical/multicast address */
-#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
-#define ENRSR_DEF 0x80 /* deferring */
-
-/* Transmitted packet status, EN0_TSR. */
-#define ENTSR_PTX 0x01 /* Packet transmitted without error */
-#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
-#define ENTSR_COL 0x04 /* The transmit collided at least once. */
-#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
-#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
-#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
-#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
-#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
-
-#endif /* _8390_h */
+++ /dev/null
-
-include $(BASEDIR)/Rules.mk
-
-default: $(OBJS)
- $(LD) -r -o ne_drv.o $(OBJS)
-
-clean:
- rm -f *.o *~ core
+++ /dev/null
-/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
-/*
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
-
- This driver should work with many programmed-I/O 8390-based ethernet
- boards. Currently it supports the NE1000, NE2000, many clones,
- and some Cabletron products.
-
- Changelog:
-
- Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
- sanity checks and bad clone support optional.
- Paul Gortmaker : new reset code, reset card after probe at boot.
- Paul Gortmaker : multiple card support for module users.
- Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
- Paul Gortmaker : Allow users with bad cards to avoid full probe.
- Paul Gortmaker : PCI probe changes, more PCI cards supported.
- rjohnson@analogic.com : Changed init order so an interrupt will only
- occur after memory is allocated for dev->priv. Deallocated memory
- last in cleanup_modue()
- Richard Guenther : Added support for ISAPnP cards
- Paul Gortmaker : Discontinued PCI support - use ne2k-pci.c instead.
-
-*/
-
-/* Routines for the NatSemi-based designs (NE[12]000). */
-
-static const char version1[] =
-"ne.c:v1.10 9/23/94 Donald Becker (becker@scyld.com)\n";
-static const char version2[] =
-"Last modified Nov 1, 2000 by Paul Gortmaker\n";
-
-
-#include <xeno/module.h>
-#include <xeno/kernel.h>
-#include <xeno/sched.h>
-#include <xeno/errno.h>
-#include <xeno/init.h>
-#include <xeno/delay.h>
-#include <asm/system.h>
-#include <asm/io.h>
-
-#include <xeno/netdevice.h>
-#include <xeno/etherdevice.h>
-#include "8390.h"
-
-/* Some defines that people can play with if so inclined. */
-
-/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
-#define SUPPORT_NE_BAD_CLONES
-
-/* Do we perform extra sanity checks on stuff ? */
-/* #define NE_SANITY_CHECK */
-
-/* Do we implement the read before write bugfix ? */
-/* #define NE_RW_BUGFIX */
-
-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
-/* #define PACKETBUF_MEMSIZE 0x40 */
-
-#ifdef SUPPORT_NE_BAD_CLONES
-/* A list of bad clones that we none-the-less recognize. */
-static struct { const char *name8, *name16; unsigned char SAprefix[4];}
-bad_clone_list[] __initdata = {
- {"DE100", "DE200", {0x00, 0xDE, 0x01,}},
- {"DE120", "DE220", {0x00, 0x80, 0xc8,}},
- {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
- {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
- {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
- {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
- {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
- {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
- {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */
- {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */
- {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
- {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
- {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
- {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
- {0,}
-};
-#endif
-
-/* ---- No user-serviceable parts below ---- */
-
-#define NE_BASE (dev->base_addr)
-#define NE_CMD 0x00
-#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
-#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT 0x20
-
-#define NE1SM_START_PG 0x20 /* First page of TX buffer */
-#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
-#define NESM_START_PG 0x40 /* First page of TX buffer */
-#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-int ne_probe(struct net_device *dev);
-static int ne_probe1(struct net_device *dev, int ioaddr);
-
-static int ne_open(struct net_device *dev);
-static int ne_close(struct net_device *dev);
-
-static void ne_reset_8390(struct net_device *dev);
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void ne_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ne_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
-
-\f
-/* Probe for various non-shared-memory ethercards.
-
- NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
- buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
- the SAPROM, while other supposed NE2000 clones must be detected by their
- SA prefix.
-
- Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
- mode results in doubled values, which can be detected and compensated for.
-
- The probe is also responsible for initializing the card and filling
- in the 'dev' and 'ei_status' structures.
-
- We use the minimum memory size for some ethercard product lines, iff we can't
- distinguish models. You can increase the packet buffer size by setting
- PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
- E1010 starts at 0x100 and ends at 0x2000.
- E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
- E2010 starts at 0x100 and ends at 0x4000.
- E2010-x starts at 0x100 and ends at 0xffff. */
-
-int __init ne_probe(struct net_device *dev)
-{
- unsigned int base_addr = dev->base_addr;
-
- SET_MODULE_OWNER(dev);
-
- /* First check any supplied i/o locations. User knows best. <cough> */
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return ne_probe1(dev, base_addr);
-
- return -ENODEV;
-}
-
-static int __init ne_probe1(struct net_device *dev, int ioaddr)
-{
- int i;
- unsigned char SA_prom[32];
- int wordlength = 2;
- const char *name = NULL;
- int start_page, stop_page;
- int neX000, ctron, copam, bad_card;
- int reg0, ret;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, NE_IO_EXTENT, dev->name))
- return -EBUSY;
-
- reg0 = inb_p(ioaddr);
- if (reg0 == 0xFF) {
- ret = -ENODEV;
- goto err_out;
- }
-
- /* Do a preliminary verification that we have a 8390. */
- {
- int regd;
- outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
- regd = inb_p(ioaddr + 0x0d);
- outb_p(0xff, ioaddr + 0x0d);
- outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
- inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
- if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
- outb_p(reg0, ioaddr);
- outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
- ret = -ENODEV;
- goto err_out;
- }
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
-
- printk(KERN_INFO "NE*000 ethercard probe at %#3x:", ioaddr);
-
- /* A user with a poor card that fails to ack the reset, or that
- does not have a valid 0x57,0x57 signature can still use this
- without having to recompile. Specifying an i/o address along
- with an otherwise unused dev->mem_end value of "0xBAD" will
- cause the driver to skip these parts of the probe. */
-
- bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad));
-
- /* Reset card. Who knows what dain-bramaged state it was left in. */
-
- {
- unsigned long reset_start_time = jiffies;
-
- /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
- outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
-
- while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
- if (jiffies - reset_start_time > 2*HZ/100) {
- if (bad_card) {
- printk(" (warning: no reset ack)");
- break;
- } else {
- printk(" not found (no reset ack).\n");
- ret = -ENODEV;
- goto err_out;
- }
- }
-
- outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
- }
-
- /* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
- We can't reliably read the SAPROM address without this.
- (I learned the hard way!). */
- {
- struct {unsigned char value, offset; } program_seq[] =
- {
- {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
- {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
- {0x00, EN0_RCNTLO}, /* Clear the count regs. */
- {0x00, EN0_RCNTHI},
- {0x00, EN0_IMR}, /* Mask completion irq. */
- {0xFF, EN0_ISR},
- {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
- {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
- {32, EN0_RCNTLO},
- {0x00, EN0_RCNTHI},
- {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
- {0x00, EN0_RSARHI},
- {E8390_RREAD+E8390_START, E8390_CMD},
- };
-
- for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
- outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
-
- }
- for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
- SA_prom[i] = inb(ioaddr + NE_DATAPORT);
- SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
- if (SA_prom[i] != SA_prom[i+1])
- wordlength = 1;
- }
-
- if (wordlength == 2)
- {
- for (i = 0; i < 16; i++)
- SA_prom[i] = SA_prom[i+i];
- /* We must set the 8390 for word mode. */
- outb_p(0x49, ioaddr + EN0_DCFG);
- start_page = NESM_START_PG;
- stop_page = NESM_STOP_PG;
- } else {
- start_page = NE1SM_START_PG;
- stop_page = NE1SM_STOP_PG;
- }
-
- neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
- ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
- copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00);
-
- /* Set up the rest of the parameters. */
- if (neX000 || bad_card || copam) {
- name = (wordlength == 2) ? "NE2000" : "NE1000";
- }
- else if (ctron)
- {
- name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
- start_page = 0x01;
- stop_page = (wordlength == 2) ? 0x40 : 0x20;
- }
- else
- {
-#ifdef SUPPORT_NE_BAD_CLONES
- /* Ack! Well, there might be a *bad* NE*000 clone there.
- Check for total bogus addresses. */
- for (i = 0; bad_clone_list[i].name8; i++)
- {
- if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
- SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
- SA_prom[2] == bad_clone_list[i].SAprefix[2])
- {
- if (wordlength == 2)
- {
- name = bad_clone_list[i].name16;
- } else {
- name = bad_clone_list[i].name8;
- }
- break;
- }
- }
- if (bad_clone_list[i].name8 == NULL)
- {
- printk(" not found (invalid signature %2.2x %2.2x).\n",
- SA_prom[14], SA_prom[15]);
- ret = -ENXIO;
- goto err_out;
- }
-#else
- printk(" not found.\n");
- ret = -ENXIO;
- goto err_out;
-#endif
- }
-
- if (dev->irq < 2)
- {
- unsigned long cookie = probe_irq_on();
- outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
- outb_p(0x00, ioaddr + EN0_RCNTLO);
- outb_p(0x00, ioaddr + EN0_RCNTHI);
- outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
- mdelay(10); /* wait 10ms for interrupt to propagate */
- outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
- dev->irq = probe_irq_off(cookie);
- if (ei_debug > 2)
- printk(" autoirq is %d\n", dev->irq);
- } else if (dev->irq == 2)
- /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
- or don't know which one to set. */
- dev->irq = 9;
-
- if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
- ret = -EAGAIN;
- goto err_out;
- }
-
- /* Allocate dev->priv and fill in 8390 specific dev fields. */
- if (ethdev_init(dev))
- {
- printk (" unable to get memory for dev->priv.\n");
- ret = -ENOMEM;
- goto err_out;
- }
-
- /* Snarf the interrupt now. There's no point in waiting since we cannot
- share and the board will usually be enabled. */
- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
- if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
- goto err_out_kfree;
- }
-
- dev->base_addr = ioaddr;
-
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
- printk(" %2.2x", SA_prom[i]);
- dev->dev_addr[i] = SA_prom[i];
- }
-
- printk("\n%s: %s found at %#x, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
-
- ei_status.name = name;
- ei_status.tx_start_page = start_page;
- ei_status.stop_page = stop_page;
- ei_status.word16 = (wordlength == 2);
-
- ei_status.rx_start_page = start_page + TX_PAGES;
-#ifdef PACKETBUF_MEMSIZE
- /* Allow the packet buffer size to be overridden by know-it-alls. */
- ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
-#endif
-
- ei_status.reset_8390 = &ne_reset_8390;
- ei_status.block_input = &ne_block_input;
- ei_status.block_output = &ne_block_output;
- ei_status.get_8390_hdr = &ne_get_8390_hdr;
- ei_status.priv = 0;
- dev->open = &ne_open;
- dev->stop = &ne_close;
- NS8390_init(dev, 0);
- return 0;
-
-err_out_kfree:
- kfree(dev->priv);
- dev->priv = NULL;
-err_out:
- release_region(ioaddr, NE_IO_EXTENT);
- return ret;
-}
-
-static int ne_open(struct net_device *dev)
-{
- ei_open(dev);
- return 0;
-}
-
-static int ne_close(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- ei_close(dev);
- return 0;
-}
-
-/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
-
-static void ne_reset_8390(struct net_device *dev)
-{
- unsigned long reset_start_time = jiffies;
-
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
-
- /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
- outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
- ei_status.txing = 0;
- ei_status.dmaing = 0;
-
- /* This check _should_not_ be necessary, omit eventually. */
- while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
- if (jiffies - reset_start_time > 2*HZ/100) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
- }
- outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int nic_base = dev->base_addr;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
-
- ei_status.dmaing |= 0x01;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
- outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
- outb_p(ring_page, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
-
- if (ei_status.word16)
- insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
- else
- insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
- outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-
- le16_to_cpus(&hdr->count);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using outb. */
-
-static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
-#ifdef NE_SANITY_CHECK
- int xfer_count = count;
-#endif
- int nic_base = dev->base_addr;
- char *buf = skb->data;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
- outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16)
- {
- insw(NE_BASE + NE_DATAPORT,buf,count>>1);
- if (count & 0x01)
- {
- buf[count-1] = inb(NE_BASE + NE_DATAPORT);
-#ifdef NE_SANITY_CHECK
- xfer_count++;
-#endif
- }
- } else {
- insb(NE_BASE + NE_DATAPORT, buf, count);
- }
-
-#ifdef NE_SANITY_CHECK
- /* This was for the ALPHA version only, but enough people have
- been encountering problems so it is still here. If you see
- this message you either 1) have a slightly incompatible clone
- or 2) have noise/speed problems with your bus. */
-
- if (ei_debug > 1)
- {
- /* DMA termination address check... */
- int addr, tries = 20;
- do {
- /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
- -- it's broken for Rx on some cards! */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- addr = (high << 8) + low;
- if (((ring_offset + xfer_count) & 0xff) == low)
- break;
- } while (--tries > 0);
- if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
- }
-#endif
- outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-}
-
-static void ne_block_output(struct net_device *dev, int count,
- const unsigned char *buf, const int start_page)
-{
- int nic_base = NE_BASE;
- unsigned long dma_start;
-#ifdef NE_SANITY_CHECK
- int retries = 0;
-#endif
-
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
-
- if (ei_status.word16 && (count & 0x01))
- count++;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing)
- {
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- /* We should already be in page 0, but to be safe... */
- outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
-
-#ifdef NE_SANITY_CHECK
-retry:
-#endif
-
-#ifdef NE8390_RW_BUGFIX
- /* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work.
- Actually this doesn't always work either, but if you have
- problems with your NEx000 this is better than nothing! */
-
- outb_p(0x42, nic_base + EN0_RCNTLO);
- outb_p(0x00, nic_base + EN0_RCNTHI);
- outb_p(0x42, nic_base + EN0_RSARLO);
- outb_p(0x00, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- /* Make certain that the dummy read has occurred. */
- udelay(6);
-#endif
-
- outb_p(ENISR_RDC, nic_base + EN0_ISR);
-
- /* Now the normal output. */
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(0x00, nic_base + EN0_RSARLO);
- outb_p(start_page, nic_base + EN0_RSARHI);
-
- outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16) {
- outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
- } else {
- outsb(NE_BASE + NE_DATAPORT, buf, count);
- }
-
- dma_start = jiffies;
-
-#ifdef NE_SANITY_CHECK
- /* This was for the ALPHA version only, but enough people have
- been encountering problems so it is still here. */
-
- if (ei_debug > 1)
- {
- /* DMA termination address check... */
- int addr, tries = 20;
- do {
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- addr = (high << 8) + low;
- if ((start_page << 8) + count == addr)
- break;
- } while (--tries > 0);
-
- if (tries <= 0)
- {
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
- if (retries++ == 0)
- goto retry;
- }
- }
-#endif
-
- while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
- if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
- ne_reset_8390(dev);
- NS8390_init(dev,1);
- break;
- }
-
- outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
- return;
-}
-
-static struct net_device dev_ne;
-
-static int __init init_module(void)
-{
- struct net_device *dev = &dev_ne;
- extern unsigned int opt_ne_base;
-
- if ( opt_ne_base == 0 ) return 0;
-
- dev->irq = 0;
- dev->mem_end = 0;
- dev->base_addr = opt_ne_base;
- dev->init = ne_probe;
-
- if ( register_netdev(dev) != 0 )
- {
- printk(KERN_WARNING "ne.c: No card found at io %#x\n", opt_ne_base);
- }
-
- return 0;
-}
-
-static void __exit cleanup_module(void)
-{
- struct net_device *dev = &dev_ne;
- if ( dev->priv != NULL )
- {
- void *priv = dev->priv;
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, NE_IO_EXTENT);
- unregister_netdev(dev);
- kfree(priv);
- }
-}
-
-module_init(init_module);
-module_exit(cleanup_module);
EXPORT_SYMBOL(register_netdev);
EXPORT_SYMBOL(unregister_netdev);
+
+void alert_slow_netdevice(struct net_device *dev, char *nictype)
+{
+ printk("***************************\n");
+ printk("* WARNING FOR NET DEVICE %s (NIC type '%s'):\n",
+ dev->name, nictype);
+ printk("* This NIC cannot support fully efficient networking in Xen.\n");
+ printk("* In particular, extra packet copies will be incurred!\n");
+ printk("* See documentation for a list of recommended NIC types\n");
+ printk("***************************\n");
+}
+
+
#ifdef CONFIG_TR
void tr_setup(struct net_device *dev)
--- /dev/null
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ * 23 Oct, 2000.
+ * Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *************************************************************************/
+
+#define DRV_NAME "pcnet32"
+#define DRV_VERSION "1.27a"
+#define DRV_RELDATE "10.02.2002"
+#define PFX DRV_NAME ": "
+
+static const char *version =
+DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#undef TX_RING_SIZE
+#undef RX_RING_SIZE
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static struct pci_device_id pcnet32_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl);
+
+int cards_found __initdata;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+
+
+static int pcnet32_debug = 1;
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct net_device *pcnet32_dev;
+
+static int max_interrupt_work = 80;
+static int rx_copybreak = 200;
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static unsigned char options_mapping[] = {
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_ASEL /* 15 not supported */
+};
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01: Initial version
+ * only tested on Alpha Noname Board
+ * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
+ * tested on a ASUS SP3G
+ * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL
+ * looks like the 974 doesn't like stopping and restarting in a
+ * short period of time; now we do a reinit of the lance; the
+ * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ * and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
+ * made it standalone (no need for lance.c)
+ * v0.13: added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14: stripped down additional PCI probe (thanks to David C Niemi
+ * and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15: added 79C965 (VLB) probe
+ * added interrupt sharing for PCI chips
+ * v0.16: fixed set_multicast_list on Alpha machines
+ * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19: changed setting of autoselect bit
+ * v0.20: removed additional Compaq PCI probe; there is now a working one
+ * in arch/i386/bios32.c
+ * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22: added printing of status to ring dump
+ * v0.23: changed enet_statistics to net_devive_stats
+ * v0.90: added multicast filter
+ * added module support
+ * changed irq probe to new style
+ * added PCnetFast chip id
+ * added fix for receive stalls with Intel saturn chipsets
+ * added in-place rx skbs like in the tulip driver
+ * minor cleanups
+ * v0.91: added PCnetFast+ chip id
+ * back port to 2.0.x
+ * v1.00: added some stuff from Donald Becker's 2.0.34 version
+ * added support for byte counters in net_dev_stats
+ * v1.01: do ring dumps, only when debugging the driver
+ * increased the transmit timeout
+ * v1.02: fixed memory leak in pcnet32_init_ring()
+ * v1.10: workaround for stopped transmitter
+ * added port selection for modules
+ * detect special T1/E1 WAN card and setup port selection
+ * v1.11: fixed wrong checking of Tx errors
+ * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu)
+ * added save original kmalloc addr for freeing (mcr@solidum.com)
+ * added support for PCnetHome chip (joe@MIT.EDU)
+ * rewritten PCI card detection
+ * added dwio mode to get driver working on some PPC machines
+ * v1.21: added mii selection and mii ioctl
+ * v1.22: changed pci scanning code to make PPC people happy
+ * fixed switching to 32bit mode in pcnet32_open() (thanks
+ * to Michael Richard <mcr@solidum.com> for noticing this one)
+ * added sub vendor/device id matching (thanks again to
+ * Michael Richard <mcr@solidum.com>)
+ * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
+ * v1.23 fixed small bug, when manual selecting MII speed/duplex
+ * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
+ * underflows. Added tx_start_pt module parameter. Increased
+ * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO
+ * for FAST[+] chipsets. <kaf@fc.hp.com>
+ * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
+ * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
+ * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France
+ * <jamey@crl.dec.com>
+ * - Fixed a few bugs, related to running the controller in 32bit mode.
+ * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
+ * v1.27 improved CSR/PROM address detection, lots of cleanups,
+ * new pcnet32vlb module option, HP-PARISC support,
+ * added module parameter descriptions,
+ * initial ethtool support - Helge Deller <deller@gmx.de>
+ * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
+ * use alloc_etherdev and register_netdev
+ * fix pci probe not increment cards_found
+ * FD auto negotiate error workaround for xSeries250
+ * clean up and using new mii module
+ */
+
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
+};
+
+struct pcnet32_tx_head {
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+ u16 (*read_csr)(unsigned long, int);
+ void (*write_csr)(unsigned long, int, u16);
+ u16 (*read_bcr)(unsigned long, int);
+ void (*write_bcr)(unsigned long, int, u16);
+ u16 (*read_rap)(unsigned long);
+ void (*write_rap)(unsigned long, u16);
+ void (*reset)(unsigned long);
+};
+
+/*
+ * The first three fields of pcnet32_private are read by the ethernet device
+ * so we allocate the structure should be allocated by pci_alloc_consistent().
+ */
+struct pcnet32_private {
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+ struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+ struct pcnet32_init_block init_block;
+ dma_addr_t dma_addr; /* DMA address of beginning of this object,
+ returned by pci_alloc_consistent */
+ struct pci_dev *pci_dev; /* Pointer to the associated pci device structure */
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t tx_dma_addr[TX_RING_SIZE];
+ dma_addr_t rx_dma_addr[RX_RING_SIZE];
+ struct pcnet32_access a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct net_device_stats stats;
+ char tx_full;
+ int options;
+ int shared_irq:1, /* shared irq possible */
+ ltint:1, /* enable TxDone-intr inhibitor */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+};
+
+static void pcnet32_probe_vlbus(void);
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, unsigned int, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
+static int pcnet32_rx(struct net_device *);
+static void pcnet32_tx_timeout (struct net_device *dev);
+static void pcnet32_interrupt(int, void *, struct pt_regs *);
+static int pcnet32_close(struct net_device *);
+static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+static void pcnet32_set_multicast_list(struct net_device *);
+#if 0
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+#endif
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
+
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+
+static u16 pcnet32_wio_read_csr (unsigned long addr, int index)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ return inw (addr+PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ outw (val, addr+PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr (unsigned long addr, int index)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ return inw (addr+PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ outw (val, addr+PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap (unsigned long addr)
+{
+ return inw (addr+PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap (unsigned long addr, u16 val)
+{
+ outw (val, addr+PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset (unsigned long addr)
+{
+ inw (addr+PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check (unsigned long addr)
+{
+ outw (88, addr+PCNET32_WIO_RAP);
+ return (inw (addr+PCNET32_WIO_RAP) == 88);
+}
+
+static struct pcnet32_access pcnet32_wio = {
+ read_csr: pcnet32_wio_read_csr,
+ write_csr: pcnet32_wio_write_csr,
+ read_bcr: pcnet32_wio_read_bcr,
+ write_bcr: pcnet32_wio_write_bcr,
+ read_rap: pcnet32_wio_read_rap,
+ write_rap: pcnet32_wio_write_rap,
+ reset: pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr (unsigned long addr, int index)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ return (inl (addr+PCNET32_DWIO_RDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ outl (val, addr+PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ return (inl (addr+PCNET32_DWIO_BDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ outl (val, addr+PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap (unsigned long addr)
+{
+ return (inl (addr+PCNET32_DWIO_RAP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_rap (unsigned long addr, u16 val)
+{
+ outl (val, addr+PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset (unsigned long addr)
+{
+ inl (addr+PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check (unsigned long addr)
+{
+ outl (88, addr+PCNET32_DWIO_RAP);
+ return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88);
+}
+
+static struct pcnet32_access pcnet32_dwio = {
+ read_csr: pcnet32_dwio_read_csr,
+ write_csr: pcnet32_dwio_write_csr,
+ read_bcr: pcnet32_dwio_read_bcr,
+ write_bcr: pcnet32_dwio_write_bcr,
+ read_rap: pcnet32_dwio_read_rap,
+ write_rap: pcnet32_dwio_write_rap,
+ reset: pcnet32_dwio_reset
+};
+
+
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void __devinit
+pcnet32_probe_vlbus(void)
+{
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (!check_region(ioaddr, PCNET32_TOTAL_SIZE)) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57))
+ pcnet32_probe1(ioaddr, 0, 0, NULL);
+ }
+ }
+}
+
+
+static int __devinit
+pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start (pdev, 0);
+ if (!ioaddr) {
+ printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n");
+ return -ENODEV;
+ }
+
+ if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
+ printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n");
+ return -ENODEV;
+ }
+
+ return pcnet32_probe1(ioaddr, pdev->irq, 1, pdev);
+}
+
+
+/* pcnet32_probe1
+ * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ * pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int __devinit
+pcnet32_probe1(unsigned long ioaddr, unsigned int irq_line, int shared,
+ struct pci_dev *pdev)
+{
+ struct pcnet32_private *lp;
+ dma_addr_t lp_dma_addr;
+ int i, media;
+ int fdx, mii, fset, dxsuflo, ltint;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ struct pcnet32_access *a = NULL;
+ u8 promaddr[6];
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else
+ return -ENODEV;
+ }
+
+ chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16);
+ if (pcnet32_debug > 2)
+ printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = ltint = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1; mii = 1; fset = 1;
+ ltint = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1; mii = 1; fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1; mii = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into 1Mb HomePNA
+ * mode. The 79C978 can also go into standard ethernet, and there
+ * probably should be some sort of module option to select the
+ * mode by which the card should operate
+ */
+ /* switch to home wiring mode */
+ media = a->read_bcr(ioaddr, 49);
+#if 0
+ if (pcnet32_debug > 2)
+ printk(KERN_DEBUG PFX "media value %#x.\n", media);
+ media &= ~3;
+ media |= 1;
+#endif
+ if (pcnet32_debug > 2)
+ printk(KERN_DEBUG PFX "media reset to %#x.\n", media);
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1; mii = 1;
+ break;
+ default:
+ printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
+ chip_version);
+ return -ENODEV;
+ }
+
+ /*
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isnt a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ */
+
+ if(fset)
+ {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0800));
+ a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ ltint = 1;
+ }
+
+ dev = alloc_etherdev(0);
+ if(!dev)
+ return -ENOMEM;
+
+ printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. Either way, we use the CSR values, and
+ * double check that they are valid.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i+12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2*i] = val & 0x0ff;
+ dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
+ for (i = 0; i < 6; i++)
+ promaddr[i] = inb(ioaddr + i);
+
+ if( memcmp( promaddr, dev->dev_addr, 6)
+ || !is_valid_ether_addr(dev->dev_addr) ) {
+#ifndef __powerpc__
+ if( is_valid_ether_addr(promaddr) ){
+#else
+ if( !is_valid_ether_addr(dev->dev_addr)
+ && is_valid_ether_addr(promaddr)) {
+#endif
+ printk(" warning: CSR address invalid,\n");
+ printk(KERN_INFO " using instead PROM address of");
+ memcpy(dev->dev_addr, promaddr, 6);
+ }
+ }
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if( !is_valid_ether_addr(dev->dev_addr) )
+ memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] );
+
+ if (((chip_version + 1) & 0xfffe) == 0x2624) { /* Version 0x2623 or 0x2624 */
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i);
+ switch(i>>10) {
+ case 0: printk(" 20 bytes,"); break;
+ case 1: printk(" 64 bytes,"); break;
+ case 2: printk(" 128 bytes,"); break;
+ case 3: printk("~220 bytes,"); break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ printk(" BCR18(%x):",i&0xffff);
+ if (i & (1<<5)) printk("BurstWrEn ");
+ if (i & (1<<6)) printk("BurstRdEn ");
+ if (i & (1<<7)) printk("DWordIO ");
+ if (i & (1<<11)) printk("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8);
+ i = a->read_bcr(ioaddr, 26);
+ printk(" SRAM_BND=0x%04x,",i<<8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1<<14)) printk("LowLatRx");
+ }
+
+ dev->base_addr = ioaddr;
+ if (request_region(ioaddr, PCNET32_TOTAL_SIZE, chipname) == NULL)
+ return -EBUSY;
+
+ /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+ if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return -ENOMEM;
+ }
+
+ memset(lp, 0, sizeof(*lp));
+ lp->dma_addr = lp_dma_addr;
+ lp->pci_dev = pdev;
+
+ spin_lock_init(&lp->lock);
+
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->mii_if.full_duplex = fdx;
+ lp->dxsuflo = dxsuflo;
+ lp->ltint = ltint;
+ lp->mii = mii;
+ if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found>=MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ if (!a) {
+ printk(KERN_ERR PFX "No access methods\n");
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return -ENODEV;
+ }
+ lp->a = *a;
+
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75)
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, tx_ring));
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr (ioaddr, 20, 2);
+
+ a->write_csr (ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) & 0xffff);
+ a->write_csr (ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >> 16);
+
+ if (irq_line) {
+ dev->irq = irq_line;
+ }
+
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d.\n", dev->irq);
+ else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr (ioaddr, 0, 0x41);
+ mdelay (1);
+
+ dev->irq = probe_irq_off (irq_mask);
+ if (dev->irq)
+ printk(", probed IRQ %d.\n", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return -ENODEV;
+ }
+ }
+
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+#if 0
+ dev->do_ioctl = &pcnet32_ioctl;
+#endif
+ dev->tx_timeout = pcnet32_tx_timeout;
+ dev->watchdog_timeo = (5*HZ);
+
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+
+ /* Fill in the generic fields of the device structure. */
+ register_netdev(dev);
+ printk(KERN_INFO "%s: registered as %s\n",dev->name, lp->name);
+ cards_found++;
+
+ alert_slow_netdevice(dev, "PCnet32/lance");
+
+ return 0;
+}
+
+
+static int
+pcnet32_open(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &pcnet32_interrupt,
+ lp->shared_irq ? SA_SHIRQ : 0, lp->name, (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ /* Check for a valid station address */
+ if( !is_valid_ether_addr(dev->dev_addr) )
+ return -EINVAL;
+
+ /* Reset the PCNET32 */
+ lp->a.reset (ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr (ioaddr, 20, 2);
+
+ if (pcnet32_debug > 1)
+ printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq,
+ (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)),
+ (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)),
+ (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
+
+ /* set/reset autoselect bit */
+ val = lp->a.read_bcr (ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
+ val |= 2;
+ lp->a.write_bcr (ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a.read_bcr (ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ i = ((lp->a.read_csr(ioaddr, 88) | (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff;
+ if (i == 0x2627) val |= 3;
+ }
+ lp->a.write_bcr (ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a.read_csr (ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a.write_csr (ioaddr, 124, val);
+
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ val = lp->a.read_bcr (ioaddr, 32) & ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a.write_bcr (ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) { /* enable auto negotiate, setup, disable fd */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a.write_bcr(ioaddr, 32, val);
+ }
+ }
+
+#ifdef DO_DXSUFLO
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a.read_csr (ioaddr, 3);
+ val |= 0x40;
+ lp->a.write_csr (ioaddr, 3, val);
+ }
+#endif
+
+ if (lp->ltint) { /* Enable TxDone-intr inhibitor */
+ val = lp->a.read_csr (ioaddr, 5);
+ val |= (1<<14);
+ lp->a.write_csr (ioaddr, 5, val);
+ }
+
+ lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ if (pcnet32_init_ring(dev))
+ return -ENOMEM;
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a.write_csr (ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &0xffff);
+ lp->a.write_csr (ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >> 16);
+
+ lp->a.write_csr (ioaddr, 4, 0x0915);
+ lp->a.write_csr (ioaddr, 0, 0x0001);
+
+ netif_start_queue(dev);
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a.read_csr (ioaddr, 0) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a.write_csr (ioaddr, 0, 0x0042);
+
+ if (pcnet32_debug > 2)
+ printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)),
+ lp->a.read_csr(ioaddr, 0));
+
+
+ MOD_INC_USE_COUNT;
+
+ return 0; /* Always succeed */
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void
+pcnet32_purge_tx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+ }
+}
+
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int
+pcnet32_init_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
+ /* there is not much, we can do at this point */
+ printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n",dev->name);
+ return -1;
+ }
+ skb_reserve (rx_skbuff, 2);
+ }
+ lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, rx_skbuff->len, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].base = 0;
+ lp->tx_ring[i].status = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr + offsetof(struct pcnet32_private, tx_ring));
+ return 0;
+}
+
+static void
+pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
+
+ /* ReInit Ring */
+ lp->a.write_csr (ioaddr, 0, 1);
+ i = 0;
+ while (i++ < 100)
+ if (lp->a.read_csr (ioaddr, 0) & 0x0100)
+ break;
+
+ lp->a.write_csr (ioaddr, 0, csr0_bits);
+}
+
+
+static void
+pcnet32_tx_timeout (struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ lp->a.write_csr (ioaddr, 0, 0x0004);
+ lp->stats.tx_errors++;
+ if (pcnet32_debug > 2) {
+ int i;
+ printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length, (unsigned)lp->rx_ring[i].status);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc, (unsigned)lp->tx_ring[i].status);
+ printk("\n");
+ }
+ pcnet32_restart(dev, 0x0042);
+
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+static int
+pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
+
+ if (pcnet32_debug > 3) {
+ printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0)
+ BUG();
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
+ if ((lp->ltint) &&
+ ((lp->cur_tx - lp->dirty_tx == TX_RING_SIZE/2) ||
+ (lp->cur_tx - lp->dirty_tx >= TX_RING_SIZE-2)))
+ {
+ /* Enable Successful-TxDone interrupt if we have
+ * 1/2 of, or nearly all of, our ring buffer Tx'd
+ * but not yet cleaned up. Thus, most of the time,
+ * we will not enable Successful-TxDone interrupts.
+ */
+ status = 0x9300;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
+ lp->tx_ring[entry].status = le16_to_cpu(status);
+
+ lp->cur_tx++;
+ lp->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ lp->a.write_csr (ioaddr, 0, 0x0048);
+
+ dev->trans_start = jiffies;
+
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ netif_start_queue(dev);
+ else {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+static void
+pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0,rap;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (!dev) {
+ printk (KERN_DEBUG "%s(): irq %d for unknown device\n",
+ __FUNCTION__, irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
+
+ spin_lock(&lp->lock);
+
+ rap = lp->a.read_rap(ioaddr);
+ while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8600 && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
+
+ must_restart = 0;
+
+ if (pcnet32_debug > 5)
+ printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, lp->a.read_csr (ioaddr, 0));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ pcnet32_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ unsigned int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->name, csr0);
+ must_restart = 1;
+ }
+#else
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ if (! lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->name, csr0);
+ must_restart = 1;
+ }
+ }
+#endif
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = 0;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This shouldn't
+ * be a problem as we will see normal rx interrupts for the frames
+ * in the receive ring. But there are some PCI chipsets (I can reproduce
+ * this on SP3G with Intel saturn chipset) which have sometimes problems
+ * and will fill up the receive ring with error descriptors. In this
+ * situation we don't get a rx interrupt, but a missed frame interrupt sooner
+ * or later. So we try to clean up our receive ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* unlike for the lance, there is no restart needed */
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ lp->a.write_csr (ioaddr, 0, 0x0004);
+ pcnet32_restart(dev, 0x0002);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ lp->a.write_csr (ioaddr, 0, 0x7940);
+ lp->a.write_rap (ioaddr,rap);
+
+ if (pcnet32_debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
+ dev->name, lp->a.read_csr (ioaddr, 0));
+
+ spin_unlock(&lp->lock);
+}
+
+static int
+pcnet32_rx(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len < 60) {
+ printk(KERN_ERR "%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ int rx_in_place = 0;
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ if ((newskb = dev_alloc_skb (PKT_BUF_SZ))) {
+ skb_reserve (newskb, 2);
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry], skb->len, PCI_DMA_FROMDEVICE);
+ skb_put (skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ newskb->dev = dev;
+ lp->rx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev, newskb->tail,
+ newskb->len, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
+ rx_in_place = 1;
+ } else
+ skb = NULL;
+ } else {
+ skb = dev_alloc_skb(pkt_len+2);
+ }
+
+ if (skb == NULL) {
+ int i;
+ printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if ((short)le16_to_cpu(lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].status) < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2) {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ if (!rx_in_place) {
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)(lp->rx_skbuff[entry]->tail),
+ pkt_len,0);
+ }
+ lp->stats.rx_bytes += skb->len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
+ }
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew Boyd
+ * of QNX reports that some revs of the 79C965 clear it.
+ */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ return 0;
+}
+
+static int
+pcnet32_close(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ netif_stop_queue(dev);
+
+ lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
+
+ if (pcnet32_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, lp->a.read_csr (ioaddr, 0));
+
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a.write_csr (ioaddr, 0, 0x0004);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr (ioaddr, 20, 4);
+
+ free_irq(dev->irq, dev);
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].status = 0;
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], lp->rx_skbuff[i]->len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *
+pcnet32_get_stats(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ saved_addr = lp->a.read_rap(ioaddr);
+ lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
+ lp->a.write_rap(ioaddr, saved_addr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &lp->stats;
+}
+
+/* taken from the sunlance driver, which it took from the depca driver */
+static void pcnet32_load_multicast (struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ volatile struct pcnet32_init_block *ib = &lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI){
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++){
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ mcast_table [crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
+ }
+ return;
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void pcnet32_set_multicast_list(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = dev->priv;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7);
+ } else {
+ lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast (dev);
+ }
+
+ lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
+
+ pcnet32_restart(dev, 0x0042); /* Resume normal operation */
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
+ int phyaddr;
+
+ if (!lp->mii)
+ return 0;
+
+ phyaddr = lp->a.read_bcr(ioaddr, 33);
+
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a.read_bcr(ioaddr, 34);
+ lp->a.write_bcr(ioaddr, 33, phyaddr);
+
+ return val_out;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr;
+
+ if (!lp->mii)
+ return;
+
+ phyaddr = lp->a.read_bcr(ioaddr, 33);
+
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a.write_bcr(ioaddr, 34, val);
+ lp->a.write_bcr(ioaddr, 33, phyaddr);
+}
+
+#if 0
+static int pcnet32_ethtool_ioctl (struct net_device *dev, void *useraddr)
+{
+ struct pcnet32_private *lp = dev->priv;
+ u32 ethcmd;
+ int phyaddr = 0;
+ int phy_id = 0;
+ unsigned long ioaddr = dev->base_addr;
+
+ if (lp->mii) {
+ phyaddr = lp->a.read_bcr (ioaddr, 33);
+ phy_id = (phyaddr >> 5) & 0x1f;
+ lp->mii_if.phy_id = phy_id;
+ }
+
+ if (copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+ strcpy (info.driver, DRV_NAME);
+ strcpy (info.version, DRV_VERSION);
+ if (lp->pci_dev)
+ strcpy (info.bus_info, lp->pci_dev->slot_name);
+ else
+ sprintf(info.bus_info, "VLB 0x%lx", dev->base_addr);
+ if (copy_to_user (useraddr, &info, sizeof (info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get settings */
+ case ETHTOOL_GSET: {
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ spin_lock_irq(&lp->lock);
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ spin_unlock_irq(&lp->lock);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set settings */
+ case ETHTOOL_SSET: {
+ int r;
+ struct ethtool_cmd ecmd;
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ spin_lock_irq(&lp->lock);
+ r = mii_ethtool_sset(&lp->mii_if, &ecmd);
+ spin_unlock_irq(&lp->lock);
+ return r;
+ }
+ /* restart autonegotiation */
+ case ETHTOOL_NWAY_RST: {
+ return mii_nway_restart(&lp->mii_if);
+ }
+ /* get link status */
+ case ETHTOOL_GLINK: {
+ struct ethtool_value edata = {ETHTOOL_GLINK};
+ edata.data = mii_link_ok(&lp->mii_if);
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get message-level */
+ case ETHTOOL_GMSGLVL: {
+ struct ethtool_value edata = {ETHTOOL_GMSGLVL};
+ edata.data = pcnet32_debug;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set message-level */
+ case ETHTOOL_SMSGLVL: {
+ struct ethtool_value edata;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ pcnet32_debug = edata.data;
+ return 0;
+ }
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
+ int phyaddr = lp->a.read_bcr (ioaddr, 33);
+
+ if (cmd == SIOCETHTOOL)
+ return pcnet32_ethtool_ioctl(dev, (void *) rq->ifr_data);
+
+ if (lp->mii) {
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = (phyaddr >> 5) & 0x1f;
+ /* Fall Through */
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ lp->a.write_bcr (ioaddr, 33, ((data->phy_id & 0x1f) << 5) | (data->reg_num & 0x1f));
+ data->val_out = lp->a.read_bcr (ioaddr, 34);
+ lp->a.write_bcr (ioaddr, 33, phyaddr);
+ return 0;
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ lp->a.write_bcr (ioaddr, 33, ((data->phy_id & 0x1f) << 5) | (data->reg_num & 0x1f));
+ lp->a.write_bcr (ioaddr, 34, data->val_in);
+ lp->a.write_bcr (ioaddr, 33, phyaddr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+#endif
+
+static struct pci_driver pcnet32_driver = {
+ name: DRV_NAME,
+ probe: pcnet32_probe_pci,
+ id_table: pcnet32_pci_tbl,
+};
+
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, DRV_NAME " debug level (0-6)");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM(tx_start_pt, "i");
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+MODULE_PARM(pcnet32vlb, "i");
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+static int tx_start_pt = -1;
+
+static int __init pcnet32_init_module(void)
+{
+ if (debug > 0)
+ pcnet32_debug = debug;
+
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
+
+ /* find the PCI devices */
+ pci_module_init(&pcnet32_driver);
+
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus();
+
+ if (cards_found) {
+ printk(KERN_INFO "%s", version);
+ printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+ }
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = pcnet32_dev->priv;
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ if (lp->pci_dev)
+ pci_unregister_driver(&pcnet32_driver);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ kfree(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c pcnet32.c"
+ * c-indent-level: 4
+ * tab-width: 8
+ * End:
+ */
--- /dev/null
+/*
+ drivers/net/tulip/21142.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+
+static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
+ of available transceivers. */
+void t21142_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+ int next_tick = 60*HZ;
+ int new_csr6 = 0;
+
+ if (tulip_debug > 2)
+ printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
+ dev->name, csr12, medianame[dev->if_port]);
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ tulip_check_duplex(dev);
+ next_tick = 60*HZ;
+ } else if (tp->nwayset) {
+ /* Don't screw up a negotiated session! */
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ } else if (tp->medialock) {
+ ;
+ } else if (dev->if_port == 3) {
+ if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
+ "trying NWay.\n", dev->name, csr12);
+ t21142_start_nway(dev);
+ next_tick = 3*HZ;
+ }
+ } else if ((csr12 & 0x7000) != 0x5000) {
+ /* Negotiation failed. Search media types. */
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
+ dev->name, csr12);
+ if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ new_csr6 = 0x82420000;
+ dev->if_port = 0;
+ outl(0, ioaddr + CSR13);
+ outl(0x0003FFFF, ioaddr + CSR14);
+ outw(t21142_csr15[dev->if_port], ioaddr + CSR15);
+ outl(t21142_csr13[dev->if_port], ioaddr + CSR13);
+ } else {
+ /* Select 100mbps port to check for link beat. */
+ new_csr6 = 0x83860000;
+ dev->if_port = 3;
+ outl(0, ioaddr + CSR13);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outw(8, ioaddr + CSR15);
+ outl(1, ioaddr + CSR13);
+ }
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ if (new_csr6 != (tp->csr6 & ~0x00D5)) {
+ tp->csr6 &= 0x00D5;
+ tp->csr6 |= new_csr6;
+ outl(0x0301, ioaddr + CSR12);
+ tulip_restart_rxtx(tp);
+ }
+ next_tick = 3*HZ;
+ }
+
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
+
+void t21142_start_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
+ ((tp->sym_advertise & 0x0020) << 1) | 0xffbf;
+
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
+ dev->name, csr14);
+ outl(0x0001, ioaddr + CSR13);
+ udelay(100);
+ outl(csr14, ioaddr + CSR14);
+ tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+ outl(tp->csr6, ioaddr + CSR6);
+ if (tp->mtable && tp->mtable->csr15dir) {
+ outl(tp->mtable->csr15dir, ioaddr + CSR15);
+ outl(tp->mtable->csr15val, ioaddr + CSR15);
+ } else
+ outw(0x0008, ioaddr + CSR15);
+ outl(0x1301, ioaddr + CSR12); /* Trigger NWAY. */
+}
+
+
+
+void t21142_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
+ "%8.8x.\n", dev->name, csr12, csr5, inl(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability. */
+ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
+ int setup_done = 0;
+ int negotiated = tp->sym_advertise & (csr12 >> 16);
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ tp->nwayset = 0;
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+ tp->full_duplex = (tulip_media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+ if (tulip_debug > 1) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+ dev->name, medianame[dev->if_port], tp->sym_advertise,
+ tp->lpar, negotiated);
+ else
+ printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
+ " link beat status %4.4x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ }
+
+ if (tp->mtable) {
+ int i;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == dev->if_port) {
+ int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65));
+ tp->cur_index = i;
+ tulip_select_media(dev, startup);
+ setup_done = 1;
+ break;
+ }
+ }
+ if ( ! setup_done) {
+ tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000) | (tp->csr6 & 0x20ff);
+ if (tp->full_duplex)
+ tp->csr6 |= 0x0200;
+ outl(1, ioaddr + CSR13);
+ }
+#if 0 /* Restart shouldn't be needed. */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
+ dev->name, inl(ioaddr + CSR5));
+#endif
+ tulip_start_rxtx(tp);
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
+ dev->name, tp->csr6, inl(ioaddr + CSR6),
+ inl(ioaddr + CSR12));
+ } else if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) ||
+ (tp->nway && (csr5 & (TPLnkFail)))) {
+ /* Link blew? Maybe restart NWay. */
+ del_timer_sync(&tp->timer);
+ t21142_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ t21142_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 5)
+ outl(inl(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+ } else if (dev->if_port == 0 || dev->if_port == 4) {
+ if ((csr12 & 4) == 0)
+ printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
+ dev->name);
+ } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ if (tulip_debug)
+ printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
+ dev->name);
+ dev->if_port = 0;
+ } else if (tp->nwayset) {
+ if (tulip_debug)
+ printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
+ dev->name, medianame[dev->if_port], tp->csr6);
+ } else { /* 100mbps link beat good. */
+ if (tulip_debug)
+ printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
+ dev->name);
+ dev->if_port = 3;
+ tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outl(0x0301, ioaddr + CSR12);
+ tulip_restart_rxtx(tp);
+ }
+}
+
+
--- /dev/null
+2002-09-18 Ryan Bradetich <rbradetich@uswest.net>
+
+ tulip hppa support:
+ * eeprom.c (tulip_build_fake_mediatable): new function
+ (tulip_parse_eeprom): call it, when no media table
+ * interrupt.c (phy_interrupt): new function
+ (tulip_interrupt): call it, before checking for no-irq-work
+ * tulip.c: add HAS_PHY_IRQ chip feature flag.
+ add csr12_shadow to tulip_private struct, only for hppa currently.
+ * tulip_core (tulip_init_one): support hppa wonky eeproms
+
+2002-05-11 Juan Quintela <quintela@mandrakesoft.com>
+
+ * 21142.c (t21142_lnk_change): Revert earlier patch
+ to always reset phy; only conditionally do so now.
+
+2002-05-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core (tulip_pci_tbl): Add new "comet"
+ pci id. Contributed by Ohta Kyuma.
+
+2002-03-07 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core (tulip_mwi_config): Use new PCI API functions
+ for enabling and disabled Memory-Write-Invalidate
+ PCI transaction.
+ Fix bugs in tulip MWI config also.
+
+2002-02-07 Uwe Bonnes <bon@elektron.ikp.physik.tu-darmstadt.de>
+
+ * tulip_core (tulip_pci_tbl[]):
+ Add PCI id for comet tulip clone.
+
+2001-12-19 John Zielinski
+
+ * tulip_core.c (tulip_up, tulip_init_one):
+ More places to revert PHY autoconfiguration bit removal.
+
+2001-12-16 Andrew Lambeth <wal@vmware.com>
+
+ * tulip_core.c (tulip_start_xmit): Use the more-portable
+ spin_lock_irqsave.
+
+2001-11-13 David S. Miller <davem@redhat.com>
+
+ * tulip_core.c (tulip_mwi_config): Kill unused label early_out.
+
+2001-11-06 Richard Mortimer <richm@oldelvet.netscapeonline.co.uk>
+
+ * tulip_core.c: Correct set of values to mask out of csr0,
+ for DM9102A chips. Limit burst/alignment of DM9102A chips
+ on Sparcs.
+
+2001-11-06 Jun Sun <jsun@mvista.com>
+
+ * tulip_core.c: Support finding MAC address on
+ two MIPS boards, DDB5476 and DDB5477.
+
+2001-11-06 Kevin B. Hendricks <khendricks@ivey.uwo.ca>
+
+ * Makefile, tulip.h, tulip_core.c, pnic2.c, 21142.c:
+ Fixes for PNIC II support.
+
+2001-11-06 David S. Miller <davem@redhat.com>
+
+ * tulip_core.c: Support reading MAC address from
+ Sparc OBP property local-mac-address.
+
+2001-07-17 Erik A. Hendriks <hendriks@lanl.gov>
+
+ * 21142.c: Merge fix from tulip.c 0.92w which prevents the
+ overwriting of csr6 bits we want to preserve.
+
+2001-07-10 Jeff Golds <jgolds@resilience.com>
+
+ * tulip_core.c: Fix two comments
+
+2001-07-06 Stephen Degler <sdegler@degler.net>
+
+ * media.c:
+ The media selection process at the end of NWAY is busted
+ because for the case of MII/SYM it needs to be:
+
+ csr13 <- 0
+ csr14 <- 0
+ csr6 <- the value calculated is okay.
+
+ In the other media cases csr14 is computed by
+ t21142_csr14val[dev->if_port], which seems ok. The value of
+ zero as opposed to 3FFFFF comes straight from appendix D of the
+ 21143 data book, and it makes logical sense because you're
+ bypassing all the SIA interface when you usa MII or SYM (see
+ figure 1-1 in the data book if your're visually oriented)
+
+2001-07-03 Jeff Golds <jgolds@resilience.com>
+
+ * tulip_core.c (tulip_clean_tx_ring):
+ Clear status for in-progress Tx's, and count
+ Tx errors for all packets being released.
+
+2001-06-16 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip.h, tulip_core.c:
+ Integrate MMIO support from devel branch, but default
+ it to off for stable kernel and driver series.
+
+2001-06-16 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_init_one):
+ Free descriptor rings on error.
+
+2001-06-16 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_mwi_config, tulip_init_one):
+ Large update to csr0 bus configuration code. This is not stable
+ yet, so it is only conditionally enabled, via CONFIG_TULIP_MWI.
+
+2001-06-16 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c:
+ Initialize timer in tulip_init_one and tulip_down,
+ not in tulip_up.
+
+2001-06-14 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c:
+ - Update tulip_suspend, tulip_resume for new PCI PM API.
+ - Surround suspend/resume code with CONFIG_PM.
+
+2001-06-12 Jeff Golds <jgolds@resilience.com>
+
+ * tulip_core.c:
+ - Reset sw ring ptrs in tulip_up. Fixes PM resume case.
+ - Clean rx and tx rings on device down.
+
+2001-06-05 David Miller <davem@redhat.com>
+
+ * tulip_core (set_rx_mode): Do not use set_bit
+ on an integer variable. Also fix endianness issue.
+
+2001-06-04 Jeff Garzik <jgarzik@pobox.com>
+
+ * interrupt.c:
+ Simplify rx processing when CONFIG_NET_HW_FLOWCONTROL is
+ active, and in the process fix a bug where flow control
+ and low load caused rx not to be acknowledged properly.
+
+2001-06-01 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip.h:
+ - Remove tulip_outl_csr helper, redundant.
+ - Add tulip_start_rxtx inline helper.
+ - tulip_stop_rxtx helper: Add synchronization. Always use current
+ csr6 value, instead of tp->csr6 value or value passed as arg.
+ - tulip_restart_rxtx helper: Add synchronization. Always
+ use tp->csr6 for desired mode, not value passed as arg.
+ - New RxOn, TxOn, RxTx constants for csr6 modes.
+ - Remove now-redundant constants csr6_st, csr6_sr.
+
+ * 21142.c, interrupt.c, media.c, pnic.c, tulip_core.c:
+ Update for above rxtx helper changes.
+
+ * interrupt.c:
+ - whitespace cleanup around #ifdef CONFIG_NET_HW_FLOWCONTROL,
+ convert tabs to spaces.
+ - Move tp->stats.rx_missed_errors update outside the ifdef.
+
+2001-05-18 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Added ethtool support.
+ ETHTOOL_GDRVINFO ioctl only, for now.
+
+2001-05-14 Robert Olsson <Robert.Olsson@data.slu.se>
+
+ * Restored HW_FLOWCONTROL from Linux 2.1 series tulip (ANK)
+ plus Jamal's NETIF_RX_* feedback control.
+
+2001-05-14 Robert Olsson <Robert.Olsson@data.slu.se>
+
+ * Added support for 21143's Interrupt Mitigation.
+ Jamal original instigator.
+
+2001-05-14 Robert Olsson <Robert.Olsson@data.slu.se>
+
+ * tulip_refill_rx prototype added to tulip.h
+
+2001-05-13 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Remove HAS_PCI_MWI flag from Comet, untested.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c, tulip.h: Remove Conexant PCI id, no chip
+ docs are available to fix problems with support.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_init_one): Do not call
+ unregister_netdev in error cleanup. Remnant of old
+ usage of init_etherdev.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * media.c (tulip_find_mii): Simple write the updated BMCR
+ twice, as it seems the best thing to do for both broken and
+ sane chips.
+ If the mii_advert value, as read from MII_ADVERTISE, is zero,
+ then generate a value we should advertise from the capability
+ bits in BMSR.
+ Fill in tp->advertising for all cases.
+ Just to be safe, clear all unwanted bits.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (private_ioctl): Fill in tp->advertising
+ when advertising value is changed by the user.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Mark Comet chips as needed the updated MWI
+ csr0 configuration.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * media.c, tulip_core.c: Move MII scan into
+ from inlined inside tulip_init_one to new function
+ tulip_find_mii in media.c.
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * media.c (tulip_check_duplex):
+ Only restart Rx/Tx engines if they are active
+ (and csr6 changes)
+
+2001-05-12 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_mwi_config):
+ Clamp values read from PCI cache line size register to
+ values acceptable to tulip chip. Done for safety and
+ -almost- certainly unneeded.
+
+2001-05-11 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_init_one):
+ Instead of unconditionally enabling autonegotiation, disable
+ autonegotiation if not using the default port. Further,
+ flip the nway bit immediately, and then update the
+ speed/duplex in a separate MII transaction. We do this
+ because some boards require that nway be disabled separately,
+ before media selection is forced.
+
+ TODO: Investigate if we can simply write the same value
+ to BMCR twice, to avoid setting unnecessarily changing
+ phy settings.
+
+2001-05-11 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip.h, tulip_core.c: If HAS_PCI_MWI is set for a
+ given chip, adjust the csr0 values not according to
+ provided values but according to system cache line size.
+ Currently cache alignment is matched as closely to cache
+ line size as possible. Currently programmable burst limit
+ is set (ie. never unlimited), and always equal to cache
+ alignment and system cache size. Currently MWI bit is set
+ only if the MWI bit is present in the PCI command register.
+
+2001-05-11 Jeff Garzik <jgarzik@pobox.com>
+
+ * media.c (tulip_select_media):
+ For media types 1 and 3, only use the provided eeprom
+ advertising value if it is non-zero.
+ (tulip_check_duplex):
+ Do not exit ASAP if full_duplex_lock is set. This
+ ensures that the csr6 value is written if an update
+ is needed.
+
+2001-05-10 Jeff Garzik <jgarzik@pobox.com>
+
+ Merge PNIC-II-specific stuff from Becker's tulip.c:
+
+ * tulip.h, 21142.c (pnic2_lnk_change): new function
+ * tulip_core.c (tulip_init_one): use it
+
+ * tulip_core.c (tulip_tx_timeout): Add specific
+ debugging for PNIC2.
+
+2001-05-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_init_one): Print out
+ tulip%d instead of PCI device number, for
+ consistency.
+
+2001-05-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * Merge changes from Becker's tulip.c:
+ Fix bugs in ioctl.
+ Fix several bugs by distinguishing between MII
+ and SYM advertising values.
+ Set CSR14 autonegotiation bit for media types 2 and 4,
+ where the SIA CSR setup values are not provided.
+
+2001-05-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * media.c (tulip_select_media): Only update MII
+ advertising value if startup arg < 2.
+
+ * tulip.h: Do not enable CSR13/14/15 autoconfiguration
+ for 21041.
+
+ * tulip_core.c:
+ 21041: add specific code for reset, and do not set CAC bit
+ When resetting media, for media table type 11 media, pass
+ value 2 as 'startup' arg to select_media, to avoid updating
+ MII advertising value.
+
+2001-05-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * pnic.c (pnic_check_duplex): remove
+ pnic.c (pnic_lnk_change, pnic_timer): use
+ tulip_check_duplex not pnic_check_duplex.
+
+ * media.c (tulip_check_duplex):
+ Clean up to use symbolic names instead of numeric constants.
+ Set TxThreshold mode as necessary as well as clearing it.
+ Update csr6 if csr6 changes, not simply if duplex changes.
+
+ (found by Manfred Spraul)
+
+2001-05-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * 21142.c, eeprom.c, tulip.h, tulip_core.c:
+ Remove DPRINTK as another, better method of
+ debug message printing is available.
+
+2001-05-09 Jeff Garzik <jgarzik@pobox.com>
+
+ * 21142.c (t21142_lnk_change): Pass arg startup==1
+ to tulip_select_media, in order to force csr13 to be
+ zeroed out prior to going to full duplex mode. Fixes
+ autonegotiation on a quad-port Znyx card.
+ (from Stephen Dengler)
+
+2001-05-09 Russell King <rmk@arm.linux.org.uk>
+
+ * interrupt.c: Better PCI bus error reporting.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Now that dev->name is only available late
+ in the probe, insert a hack to replace a not-evaluated
+ "eth%d" string with an evaluated "tulip%d" string.
+ Also, remove obvious comment and an indentation cleanup.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: If we are a module, always print out the
+ version string. If we are built into the kernel, only print
+ the version string if at least one tulip is detected.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ Merged from Becker's tulip.c 0.92t:
+
+ * tulip_core.c: Add support for Conexant LANfinity.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Only suspend/resume if the interface
+ is up and running. Use alloc_etherdev and pci_request_regions.
+ Spelling fix.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Remove code that existed when one or more of
+ the following defines existed. These defines were never used
+ by normal users in practice: TULIP_FULL_DUPLEX,
+ TULIP_DEFAULT_MEDIA, and TULIP_NO_MEDIA_SWITCH.
+
+ * tulip.h, eeprom.c: Move EE_* constants from tulip.h to eeprom.c.
+ * tulip.h, media.c: Move MDIO_* constants from tulip.h to media.c.
+
+ * media.c: Add barrier() to mdio_read/write's PNIC status check
+ loops.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ Merged from Becker's tulip.c 0.92t:
+
+ * tulip.h: Add MEDIA_MASK constant for bounding medianame[]
+ array lookups.
+ * eeprom.c, media.c, timer.c, tulip_core.c: Use it.
+
+ * media.c, tulip_core.c: mdio_{read,write} cleanup. Since this
+ is called [pretty much] directly from ioctl, we mask
+ read/write arguments to limit the values passed.
+ Added mii_lock. Added comet_miireg2offset and better
+ Comet-specific mdio_read/write code. Pay closer attention
+ to the bits we set in ioctl. Remove spinlocks from ioctl,
+ they are in mdio_read/write now. Use mask to limit
+ phy number in tulip_init_one's MII scan.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ Merged from Becker's tulip.c 0.92t:
+
+ * 21142.c, tulip_core.c: PNIC2 MAC address and NWay fixes.
+ * tulip.h: Add FullDuplex constant, used in above change.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * timer.c: Do not call netif_carrier_{on,off}, it is not used in
+ the main tree. Leave code in, disabled, as markers for future
+ carrier notification.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ Merged from Becker's tulip.c 0.92t, except for the tulip.h
+ whitespace cleanup:
+
+ * interrupt.c: If Rx stops, make sure to update the
+ multicast filter before restarting.
+ * tulip.h: Add COMET_MAC_ADDR feature flag, clean up flags.
+ Add Accept* Rx mode bit constants.
+ Add mc_filter[] to driver private struct.
+ * tulip_core.c: Add new Comet PCI id 0x1113:0x9511.
+ Add COMET_MAC_ADDR feature flag to comet entry in board info array.
+ Prefer to test COMET_MAC_ADDR flag to testing chip_id for COMET,
+ when dealing with the Comet's MAC address.
+ Enable Tx underrun recovery for Comet chips.
+ Use new Accept* constants in set_rx_mode.
+ Prefer COMET_MAC_ADDR flag test to chip_id test in set_rx_mode.
+ Store built mc_filter for later use in intr handler by Comets.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: Use tp->cur_tx when building the
+ setup frame, instead of assuming that the setup
+ frame is always built in slot zero. This case is
+ hit during PM resume.
+
+2001-04-03 Jeff Garzik <jgarzik@pobox.com>
+
+ * *.c: Update file headers (copyright, urls, etc.)
+ * Makefile: re-order to that chip-specific modules on own line
+ * eeprom.c: BSS/zero-init cleanup (Andrey Panin)
+ * tulip_core.c: merge medianame[] update from tulip.c.
+ Additional arch-specific rx_copybreak, csr0 values. (various)
+
+2001-02-20 Jeff Garzik <jgarzik@pobox.com>
+
+ * media.c (tulip_select_media): No need to initialize
+ new_csr6, all cases initialize it properly.
+
+2001-02-18 Manfred Spraul <manfred@colorfullife.com>
+
+ * interrupt.c (tulip_refill_rx): Make public.
+ If PNIC chip stops due to lack of Rx buffers, restart it.
+ (tulip_interrupt): PNIC doesn't have a h/w timer, emulate
+ with software timers.
+ * pnic.c (pnic_check_duplex): New function, PNIC-specific
+ version of tulip_check_duplex.
+ (pnic_lnk_change): Call pnic_check_duplex. If we use an
+ external MII, then we mustn't use the internal negotiation.
+ (pnic_timer): Support Rx refilling on work overflow in
+ interrupt handler, as PNIC doesn't support a h/w timer.
+ * tulip_core.c (tulip_tbl[]): Modify default csr6
+
+2001-02-11 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_init_one): Call pci_enable_device
+ to ensure wakeup/resource assignment before checking those
+ values.
+ (tulip_init_one): Replace PCI ids with constants from pci_id.h.
+ (tulip_suspend, tulip_resume, tulip_remove_one): Call
+ pci_power_on/off (commented out for now).
+
+2001-02-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip.h: Add CFDD_xxx bits for Tulip power management
+ * tulip_core.c (tulip_set_power_state): New function,
+ manipulating Tulip chip power state where supported.
+ (tulip_up, tulip_down, tulip_init_one): Use it.
+
+2001-02-10 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_tx_timeout): Call netif_wake_queue
+ to ensure the next Tx is always sent to us.
+
+2001-01-27 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (tulip_remove_one): Fix mem leak by freeing
+ tp->media_tbl. Add check for !dev, reformat code appropriately.
+
+2001-01-27 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_tbl[]: Comment all entries to make order and chip_id
+ relationship more clear.
+ * tulip_pci_tbl[]: Add new Accton PCI id (COMET chipset).
+
+2001-01-16 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: static vars no longer explicitly
+ initialized to zero.
+ * eeprom.c (tulip_read_eeprom): Make sure to delay between
+ EE_ENB and EE_ENB|EE_SHIFT_CLK. Merged from becker tulip.c.
+
+2001-01-05 Peter De Schrijver <p2@mind.be>
+
+ * eeprom.c (tulip_parse_eeprom): Interpret a bit more of 21142
+ extended format type 3 info blocks in a tulip SROM.
+
+2001-01-03 Matti Aarnio <matti.aarnio@zmailer.org>
+
+ * media.c (tulip_select_media): Support media types 5 and 6
+
+2001-??-?? ??
+
+ * tulip_core.c: Add comment about LanMedia needing
+ a different driver.
+ Enable workarounds for early PCI chipsets.
+ Add IA64 csr0 support, update HPPA csr0 support.
+
+2000-12-17 Alan Cox <alan@redhat.com>
+
+ * eeprom.c, timer.c, tulip.h, tulip_core.c: Merge support
+ for the Davicom's quirks into the main tulip.
+ Patch by Tobias Ringstrom
+
+2000-11-08 Jim Studt <jim@federated.com>
+
+ * eeprom.c (tulip_parse_eeprom): Check array bounds for
+ medianame[] and block_name[] arrays to avoid oops due
+ to bad values returned from hardware.
+
+2000-11-02 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c (set_rx_mode): This is synchronized via
+ dev->xmit_lock, so only the queueing of the setup frame needs to
+ be locked, against tulip_interrupt.
+
+2000-11-02 Alexey Kuznetov <kuznet@ms2.inr.ac.ru>
+
+ * timer.c (tulip_timer): Call netif_carrier_{on,off} to report
+ link state to the rest of the kernel, and userspace.
+ * interrupt.c (tulip_interrupt): Remove tx_full.
+ * tulip.h: Likewise.
+ * tulip_core.c (tulip_init_ring, tulip_start_xmit, set_rx_mode):
+ Likewise.
+
+2000-10-18 Jeff Garzik <jgarzik@pobox.com>
+
+ * tulip_core.c: (tulip_init_one) Print out ethernet interface
+ on error. Print out a message when pci_enable_device fails.
+ Handle DMA alloc failure.
+
+2000-10-18 Jeff Garzik <jgarzik@pobox.com>
+
+ * Makefile: New file.
+ * tulip_core.c (tulip_init_one): Correct error messages
+ on PIO/MMIO region reserve failure.
+ (tulip_init_one) Add new check to ensure that PIO region is
+ sufficient for our needs.
+
--- /dev/null
+
+include $(BASEDIR)/Rules.mk
+
+default: $(OBJS)
+ $(LD) -r -o tulip.o $(OBJS)
+
+clean:
+ rm -f *.o *~ core
--- /dev/null
+/*
+ drivers/net/tulip/eeprom.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include "tulip.h"
+#include <linux/init.h>
+#include <asm/unaligned.h>
+
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+ Search www.digital.com for "21X4 SROM" to get details.
+ This code is very complex, and will require changes to support
+ additional cards, so I'll be verbose about what is going on.
+ */
+
+/* Known cards that have old-style EEPROMs. */
+static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
+ {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+ 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+ {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0903, 0x006D, /* 100baseTx */
+ 0x0905, 0x006D, /* 100baseTx-FD */ }},
+ {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+ 0x0107, 0x8021, /* 100baseFx */
+ 0x0108, 0x8021, /* 100baseFx-FD */
+ 0x0100, 0x009E, /* 10baseT */
+ 0x0104, 0x009E, /* 10baseT-FD */
+ 0x0103, 0x006D, /* 100baseTx */
+ 0x0105, 0x006D, /* 100baseTx-FD */ }},
+ {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+ 0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+ 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+ {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+ 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */
+ 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */
+ 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+ 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+ 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+ }},
+ {"NetWinder", 0x00, 0x10, 0x57,
+ /* Default media = MII
+ * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1
+ */
+ { 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 }
+ },
+ {0, 0, 0, 0, {}}};
+
+
+static const char *block_name[] __devinitdata = {
+ "21140 non-MII",
+ "21140 MII PHY",
+ "21142 Serial PHY",
+ "21142 MII PHY",
+ "21143 SYM PHY",
+ "21143 reset method"
+};
+
+
+/**
+ * tulip_build_fake_mediatable - Build a fake mediatable entry.
+ * @tp: Ptr to the tulip private data.
+ *
+ * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
+ * srom and can not be handled under the fixup routine. These cards
+ * still need a valid mediatable entry for correct csr12 setup and
+ * mii handling.
+ *
+ * Since this is currently a parisc-linux specific function, the
+ * #ifdef __hppa__ should completely optimize this function away for
+ * non-parisc hardware.
+ */
+static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
+{
+#ifdef __hppa__
+ unsigned char *ee_data = tp->eeprom;
+
+ if (ee_data[0] == 0x3c && ee_data[1] == 0x10 &&
+ (ee_data[2] == 0x63 || ee_data[2] == 0x61) && ee_data[3] == 0x10) {
+
+ static unsigned char leafdata[] =
+ { 0x01, /* phy number */
+ 0x02, /* gpr setup sequence length */
+ 0x02, 0x00, /* gpr setup sequence */
+ 0x02, /* phy reset sequence length */
+ 0x01, 0x00, /* phy reset sequence */
+ 0x00, 0x78, /* media capabilities */
+ 0x00, 0xe0, /* nway advertisment */
+ 0x00, 0x05, /* fdx bit map */
+ 0x00, 0x06 /* ttm bit map */
+ };
+
+ tp->mtable = (struct mediatable *)
+ kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL);
+
+ if (tp->mtable == NULL)
+ return; /* Horrible, impossible failure. */
+
+ tp->mtable->defaultmedia = 0x800;
+ tp->mtable->leafcount = 1;
+ tp->mtable->csr12dir = 0x3f; /* inputs on bit7 for hsc-pci, bit6 for pci-fx */
+ tp->mtable->has_nonmii = 0;
+ tp->mtable->has_reset = 0;
+ tp->mtable->has_mii = 1;
+ tp->mtable->csr15dir = tp->mtable->csr15val = 0;
+ tp->mtable->mleaf[0].type = 1;
+ tp->mtable->mleaf[0].media = 11;
+ tp->mtable->mleaf[0].leafdata = &leafdata[0];
+ tp->flags |= HAS_PHY_IRQ;
+ tp->csr12_shadow = -1;
+ }
+#endif
+}
+
+void __devinit tulip_parse_eeprom(struct net_device *dev)
+{
+ /* The last media info list parsed, for multiport boards. */
+ static struct mediatable *last_mediatable;
+ static unsigned char *last_ee_data;
+ static int controller_index;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ unsigned char *ee_data = tp->eeprom;
+ int i;
+
+ tp->mtable = 0;
+ /* Detect an old-style (SA only) EEPROM layout:
+ memcmp(eedata, eedata+16, 8). */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ break;
+ if (i >= 8) {
+ if (ee_data[0] == 0xff) {
+ if (last_mediatable) {
+ controller_index++;
+ printk(KERN_INFO "%s: Controller %d of multiport board.\n",
+ dev->name, controller_index);
+ tp->mtable = last_mediatable;
+ ee_data = last_ee_data;
+ goto subsequent_board;
+ } else
+ printk(KERN_INFO "%s: Missing EEPROM, this interface may "
+ "not work correctly!\n",
+ dev->name);
+ return;
+ }
+ /* Do a fix-up based on the vendor half of the station address prefix. */
+ for (i = 0; eeprom_fixups[i].name; i++) {
+ if (dev->dev_addr[0] == eeprom_fixups[i].addr0
+ && dev->dev_addr[1] == eeprom_fixups[i].addr1
+ && dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+ if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
+ i++; /* An Accton EN1207, not an outlaw Maxtech. */
+ memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+ sizeof(eeprom_fixups[i].newtable));
+ printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
+ " substitute media control info.\n",
+ dev->name, eeprom_fixups[i].name);
+ break;
+ }
+ }
+ if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+ printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+ "information.\n",
+ dev->name);
+ return;
+ }
+ }
+
+ controller_index = 0;
+ if (ee_data[19] > 1) { /* Multiport board. */
+ last_ee_data = ee_data;
+ }
+subsequent_board:
+
+ if (ee_data[27] == 0) { /* No valid media table. */
+ tulip_build_fake_mediatable(tp);
+ } else if (tp->chip_id == DC21041) {
+ unsigned char *p = (void *)ee_data + ee_data[27 + controller_index*3];
+ int media = get_u16(p);
+ int count = p[2];
+ p += 3;
+
+ printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+ dev->name, media,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ unsigned char media_block = *p++;
+ int media_code = media_block & MEDIA_MASK;
+ if (media_block & 0x40)
+ p += 6;
+ printk(KERN_INFO "%s: 21041 media #%d, %s.\n",
+ dev->name, media_code, medianame[media_code]);
+ }
+ } else {
+ unsigned char *p = (void *)ee_data + ee_data[27];
+ unsigned char csr12dir = 0;
+ int count, new_advertise = 0;
+ struct mediatable *mtable;
+ u16 media = get_u16(p);
+
+ p += 2;
+ if (tp->flags & CSR12_IN_SROM)
+ csr12dir = *p++;
+ count = *p++;
+
+ /* there is no phy information, don't even try to build mtable */
+ if (count == 0) {
+ if (tulip_debug > 0)
+ printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name);
+ return;
+ }
+
+ mtable = (struct mediatable *)
+ kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
+ GFP_KERNEL);
+ if (mtable == NULL)
+ return; /* Horrible, impossible failure. */
+ last_mediatable = tp->mtable = mtable;
+ mtable->defaultmedia = media;
+ mtable->leafcount = count;
+ mtable->csr12dir = csr12dir;
+ mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+ mtable->csr15dir = mtable->csr15val = 0;
+
+ printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ struct medialeaf *leaf = &mtable->mleaf[i];
+
+ if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+ leaf->type = 0;
+ leaf->media = p[0] & 0x3f;
+ leaf->leafdata = p;
+ if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */
+ mtable->has_mii = 1;
+ p += 4;
+ } else {
+ leaf->type = p[1];
+ if (p[1] == 0x05) {
+ mtable->has_reset = i;
+ leaf->media = p[2] & 0x0f;
+ } else if (tp->chip_id == DM910X && p[1] == 0x80) {
+ /* Hack to ignore Davicom delay period block */
+ mtable->leafcount--;
+ count--;
+ i--;
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ continue;
+ } else if (p[1] & 1) {
+ int gpr_len, reset_len;
+
+ mtable->has_mii = 1;
+ leaf->media = 11;
+ gpr_len=p[3]*2;
+ reset_len=p[4+gpr_len]*2;
+ new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
+ } else {
+ mtable->has_nonmii = 1;
+ leaf->media = p[2] & MEDIA_MASK;
+ /* Davicom's media number for 100BaseTX is strange */
+ if (tp->chip_id == DM910X && leaf->media == 1)
+ leaf->media = 3;
+ switch (leaf->media) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ case 3: new_advertise |= 0x0080; break;
+ case 5: new_advertise |= 0x0100; break;
+ case 6: new_advertise |= 0x0200; break;
+ }
+ if (p[1] == 2 && leaf->media == 0) {
+ if (p[2] & 0x40) {
+ u32 base15 = get_unaligned((u16*)&p[7]);
+ mtable->csr15dir =
+ (get_unaligned((u16*)&p[9])<<16) + base15;
+ mtable->csr15val =
+ (get_unaligned((u16*)&p[11])<<16) + base15;
+ } else {
+ mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
+ mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
+ }
+ }
+ }
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ }
+ if (tulip_debug > 1 && leaf->media == 11) {
+ unsigned char *bp = leaf->leafdata;
+ printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
+ "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+ dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ }
+ printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
+ "by a %s (%d) block.\n",
+ dev->name, i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
+ }
+ if (new_advertise)
+ tp->sym_advertise = new_advertise;
+ }
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
+#define EE_CS 0x01 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x05
+#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */
+#define EE_ENB (0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
+ We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD (6)
+
+/* Note: this routine returns extra data bits for size detection. */
+int __devinit tulip_read_eeprom(long ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ long ee_addr = ioaddr + CSR9;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
--- /dev/null
+/*
+ drivers/net/tulip/interrupt.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include "tulip.h"
+#include <linux/config.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+
+int tulip_rx_copybreak;
+unsigned int tulip_max_interrupt_work;
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+
+#define MIT_SIZE 15
+unsigned int mit_table[MIT_SIZE+1] =
+{
+ /* CRS11 21143 hardware Mitigation Control Interrupt
+ We use only RX mitigation we other techniques for
+ TX intr. mitigation.
+
+ 31 Cycle Size (timer control)
+ 30:27 TX timer in 16 * Cycle size
+ 26:24 TX No pkts before Int.
+ 23:20 RX timer in Cycle size
+ 19:17 RX No pkts before Int.
+ 16 Continues Mode (CM)
+ */
+
+ 0x0, /* IM disabled */
+ 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
+ 0x80150000,
+ 0x80270000,
+ 0x80370000,
+ 0x80490000,
+ 0x80590000,
+ 0x80690000,
+ 0x807B0000,
+ 0x808B0000,
+ 0x809D0000,
+ 0x80AD0000,
+ 0x80BD0000,
+ 0x80CF0000,
+ 0x80DF0000,
+// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
+ 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
+};
+#endif
+
+
+int tulip_refill_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry;
+ int refilled = 0;
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break;
+
+ mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[entry].mapping = mapping;
+
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+ refilled++;
+ }
+ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+ }
+ if(tp->chip_id == LC82C168) {
+ if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
+ /* Rx stopped due to out of buffers,
+ * restart it
+ */
+ outl(0x01, dev->base_addr + CSR2);
+ }
+ }
+ return refilled;
+}
+
+
+static int tulip_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ int received = 0;
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ int drop = 0, mit_sel = 0;
+
+/* that one buffer is needed for mit activation; or might be a
+ bug in the ring buffer code; check later -- JHS*/
+
+ if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
+#endif
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+ if (tulip_debug > 5)
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ dev->name, entry, status);
+ if (--rx_work_limit < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) tp->stats.rx_length_errors++;
+ if (status & 0x0004) tp->stats.rx_frame_errors++;
+ if (status & 0x0002) tp->stats.rx_crc_errors++;
+ if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (pkt_len > 1518) {
+ printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+ dev->name, pkt_len, pkt_len);
+ pkt_len = 1518;
+ tp->stats.rx_length_errors++;
+ }
+#endif
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ drop = atomic_read(&netdev_dropping);
+ if (drop)
+ goto throttle;
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tulip_rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+#if ! defined(__alpha__)
+ eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+ pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ tp->rx_buffers[entry].skb->tail,
+ pkt_len);
+#endif
+ } else { /* Pass up the skb already on the Rx ring. */
+ char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
+ pkt_len);
+
+#ifndef final_version
+ if (tp->rx_buffers[entry].mapping !=
+ le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
+ dev->name,
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ tp->rx_buffers[entry].mapping,
+ skb->head, temp);
+ }
+#endif
+
+ pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ tp->rx_buffers[entry].skb = NULL;
+ tp->rx_buffers[entry].mapping = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ mit_sel =
+#endif
+ netif_rx(skb);
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ switch (mit_sel) {
+ case NET_RX_SUCCESS:
+ case NET_RX_CN_LOW:
+ case NET_RX_CN_MOD:
+ break;
+
+ case NET_RX_CN_HIGH:
+ rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
+ break;
+ case NET_RX_DROP:
+ rx_work_limit = -1;
+ break;
+ default:
+ printk("unknown feedback return code %d\n", mit_sel);
+ break;
+ }
+
+ drop = atomic_read(&netdev_dropping);
+ if (drop) {
+throttle:
+ rx_work_limit = -1;
+ mit_sel = NET_RX_DROP;
+
+ if (tp->fc_bit) {
+ long ioaddr = dev->base_addr;
+
+ /* disable Rx & RxNoBuf ints. */
+ outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
+ set_bit(tp->fc_bit, &netdev_fc_xoff);
+ }
+ }
+#endif
+ dev->last_rx = jiffies;
+ tp->stats.rx_packets++;
+ tp->stats.rx_bytes += pkt_len;
+ }
+ received++;
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+
+ /* We use this simplistic scheme for IM. It's proven by
+ real life installations. We can have IM enabled
+ continuesly but this would cause unnecessary latency.
+ Unfortunely we can't use all the NET_RX_* feedback here.
+ This would turn on IM for devices that is not contributing
+ to backlog congestion with unnecessary latency.
+
+ We monitor the device RX-ring and have:
+
+ HW Interrupt Mitigation either ON or OFF.
+
+ ON: More then 1 pkt received (per intr.) OR we are dropping
+ OFF: Only 1 pkt received
+
+ Note. We only use min and max (0, 15) settings from mit_table */
+
+
+ if( tp->flags & HAS_INTR_MITIGATION) {
+ if((received > 1 || mit_sel == NET_RX_DROP)
+ && tp->mit_sel != 15 ) {
+ tp->mit_sel = 15;
+ tp->mit_change = 1; /* Force IM change */
+ }
+ if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
+ tp->mit_sel = 0;
+ tp->mit_change = 1; /* Force IM change */
+ }
+ }
+
+ return RX_RING_SIZE+1; /* maxrx+1 */
+#else
+ return received;
+#endif
+}
+
+static inline void phy_interrupt (struct net_device *dev)
+{
+#ifdef __hppa__
+ int csr12 = inl(dev->base_addr + CSR12) & 0xff;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+
+ if (csr12 != tp->csr12_shadow) {
+ /* ack interrupt */
+ outl(csr12 | 0x02, dev->base_addr + CSR12);
+ tp->csr12_shadow = csr12;
+ /* do link change stuff */
+ spin_lock(&tp->lock);
+ tulip_check_duplex(dev);
+ spin_unlock(&tp->lock);
+ /* clear irq ack bit */
+ outl(csr12 & ~0x02, dev->base_addr + CSR12);
+ }
+#endif
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr5;
+ int entry;
+ int missed;
+ int rx = 0;
+ int tx = 0;
+ int oi = 0;
+ int maxrx = RX_RING_SIZE;
+ int maxtx = TX_RING_SIZE;
+ int maxoi = TX_RING_SIZE;
+ unsigned int work_count = tulip_max_interrupt_work;
+
+ /* Let's see whether the interrupt really is for us */
+ csr5 = inl(ioaddr + CSR5);
+
+ if (tp->flags & HAS_PHY_IRQ)
+ phy_interrupt (dev);
+
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ return;
+
+ tp->nir++;
+
+ do {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ dev->name, csr5, inl(dev->base_addr + CSR5));
+
+ if (csr5 & (RxIntr | RxNoBuf)) {
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if ((!tp->fc_bit) ||
+ (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
+#endif
+ rx += tulip_rx(dev);
+ tulip_refill_rx(dev);
+ }
+
+ if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+ unsigned int dirty_tx;
+
+ spin_lock(&tp->lock);
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still has not been Txed */
+
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ tp->stats.tx_errors++;
+ if (status & 0x4104) tp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+ if (status & 0x0200) tp->stats.tx_window_errors++;
+ if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ if ((status & 0x0080) && tp->full_duplex == 0)
+ tp->stats.tx_heartbeat_errors++;
+ } else {
+ tp->stats.tx_bytes +=
+ tp->tx_buffers[entry].skb->len;
+ tp->stats.collisions += (status >> 3) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ tx++;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+ dev->name, dirty_tx, tp->cur_tx);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+ netif_wake_queue(dev);
+
+ tp->dirty_tx = dirty_tx;
+ if (csr5 & TxDied) {
+ if (tulip_debug > 2)
+ printk(KERN_WARNING "%s: The transmitter stopped."
+ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+ dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+ tulip_restart_rxtx(tp);
+ }
+ spin_unlock(&tp->lock);
+ }
+
+ /* Log errors. */
+ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
+ if (csr5 == 0xffffffff)
+ break;
+ if (csr5 & TxJabber) tp->stats.tx_errors++;
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & 0xC000) != 0xC000)
+ tp->csr6 += 0x4000; /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= 0x00200000; /* Store-n-forward. */
+ /* Restart the transmit process. */
+ tulip_restart_rxtx(tp);
+ outl(0, ioaddr + CSR1);
+ }
+ if (csr5 & (RxDied | RxNoBuf)) {
+ if (tp->flags & COMET_MAC_ADDR) {
+ outl(tp->mc_filter[0], ioaddr + 0xAC);
+ outl(tp->mc_filter[1], ioaddr + 0xB0);
+ }
+ }
+ if (csr5 & RxDied) { /* Missed a Rx frame. */
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
+ tp->stats.rx_errors++;
+ tulip_start_rxtx(tp);
+ }
+#else
+ tp->stats.rx_errors++;
+ tulip_start_rxtx(tp);
+#endif
+ }
+ /*
+ * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
+ * call is ever done under the spinlock
+ */
+ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+ if (tp->link_change)
+ (tp->link_change)(dev, csr5);
+ }
+ if (csr5 & SytemError) {
+ int error = (csr5 >> 23) & 7;
+ /* oops, we hit a PCI error. The code produced corresponds
+ * to the reason:
+ * 0 - parity error
+ * 1 - master abort
+ * 2 - target abort
+ * Note that on parity error, we should do a software reset
+ * of the chip to get it back into a sane state (according
+ * to the 21142/3 docs that is).
+ * -- rmk
+ */
+ printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
+ dev->name, tp->nir, error);
+ }
+ /* Clear all error sources, included undocumented ones! */
+ outl(0x0800f7ba, ioaddr + CSR5);
+ oi++;
+ }
+ if (csr5 & TimerInt) {
+
+ if (tulip_debug > 2)
+ printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+ dev->name, csr5);
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if (tp->fc_bit && (test_bit(tp->fc_bit, &netdev_fc_xoff)))
+ if (net_ratelimit()) printk("BUG!! enabling interupt when FC off (timerintr.) \n");
+#endif
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ tp->ttimer = 0;
+ oi++;
+ }
+ if (tx > maxtx || rx > maxrx || oi > maxoi) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Too much work during an interrupt, "
+ "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
+
+ /* Acknowledge all interrupt sources. */
+ outl(0x8001ffff, ioaddr + CSR5);
+ if (tp->flags & HAS_INTR_MITIGATION) {
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if(tp->mit_change) {
+ outl(mit_table[tp->mit_sel], ioaddr + CSR11);
+ tp->mit_change = 0;
+ }
+#else
+ /* Josip Loncaric at ICASE did extensive experimentation
+ to develop a good interrupt mitigation setting.*/
+ outl(0x8b240000, ioaddr + CSR11);
+#endif
+ } else if (tp->chip_id == LC82C168) {
+ /* the LC82C168 doesn't have a hw timer.*/
+ outl(0x00, ioaddr + CSR7);
+ mod_timer(&tp->timer, RUN_AT(HZ/50));
+ } else {
+ /* Mask all interrupting sources, set timer to
+ re-enable. */
+#ifndef CONFIG_NET_HW_FLOWCONTROL
+ outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
+ outl(0x0012, ioaddr + CSR11);
+#endif
+ }
+ break;
+ }
+
+ work_count--;
+ if (work_count == 0)
+ break;
+
+ csr5 = inl(ioaddr + CSR5);
+ } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
+
+ tulip_refill_rx(dev);
+
+ /* check if the card is in suspend mode */
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+ if (tp->chip_id == LC82C168) {
+ outl(0x00, ioaddr + CSR7);
+ mod_timer(&tp->timer, RUN_AT(HZ/50));
+ } else {
+ if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+ outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+ ioaddr + CSR7);
+ outl(TimerInt, ioaddr + CSR5);
+ outl(12, ioaddr + CSR11);
+ tp->ttimer = 1;
+ }
+ }
+ }
+
+ if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
+ tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+ }
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+ dev->name, inl(ioaddr + CSR5));
+
+}
--- /dev/null
+/*
+ drivers/net/tulip/media.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "tulip.h"
+
+
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC 0x6969
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+ MDIO protocol. It is just different enough from the EEPROM protocol
+ to not share code. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+ 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
+ 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+ int retval = 0;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return 0xffff;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ return inl(ioaddr + comet_miireg2offset[location]);
+ return 0xffff;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ while (--i > 0) {
+ barrier();
+ if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+ break;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return retval & 0xffff;
+ }
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return (retval>>1) & 0xffff;
+}
+
+void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+ int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff);
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ outl(val, ioaddr + comet_miireg2offset[location]);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(cmd, ioaddr + 0xA0);
+ do {
+ barrier();
+ if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+ break;
+ } while (--i > 0);
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+
+ /* Establish sync by sending 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+}
+
+
+/* Set up the transceiver control registers for the selected media type. */
+void tulip_select_media(struct net_device *dev, int startup)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct mediatable *mtable = tp->mtable;
+ u32 new_csr6;
+ int i;
+
+ if (mtable) {
+ struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+ unsigned char *p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: /* 21140 non-MII xcvr. */
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+ " with control setting %2.2x.\n",
+ dev->name, p[1]);
+ dev->if_port = p[0];
+ if (startup)
+ outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ outl(p[1], ioaddr + CSR12);
+ new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+ break;
+ case 2: case 4: {
+ u16 setup[5];
+ u32 csr13val, csr14val, csr15dir, csr15val;
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ dev->if_port = p[0] & MEDIA_MASK;
+ if (tulip_media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+ unsigned char *rst = rleaf->leafdata;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+ "%4.4x/%4.4x.\n",
+ dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
+ csr13val = setup[0];
+ csr14val = setup[1];
+ csr15dir = (setup[3]<<16) | setup[2];
+ csr15val = (setup[4]<<16) | setup[2];
+ outl(0, ioaddr + CSR13);
+ outl(csr14val, ioaddr + CSR14);
+ outl(csr15dir, ioaddr + CSR15); /* Direction */
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ outl(csr13val, ioaddr + CSR13);
+ } else {
+ csr13val = 1;
+ csr14val = 0;
+ csr15dir = (setup[0]<<16) | 0x0008;
+ csr15val = (setup[1]<<16) | 0x0008;
+ if (dev->if_port <= 4)
+ csr14val = t21142_csr14[dev->if_port];
+ if (startup) {
+ outl(0, ioaddr + CSR13);
+ outl(csr14val, ioaddr + CSR14);
+ }
+ outl(csr15dir, ioaddr + CSR15); /* Direction */
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ if (startup) outl(csr13val, ioaddr + CSR13);
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
+ dev->name, csr15dir, csr15val);
+ if (mleaf->type == 4)
+ new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
+ else
+ new_csr6 = 0x82420000;
+ break;
+ }
+ case 1: case 3: {
+ int phy_num = p[0];
+ int init_length = p[1];
+ u16 *misc_info, tmp_info;
+
+ dev->if_port = 11;
+ new_csr6 = 0x020E0000;
+ if (mleaf->type == 3) { /* 21142 */
+ u16 *init_sequence = (u16*)(p+2);
+ u16 *reset_sequence = &((u16*)(p+3))[init_length];
+ int reset_length = p[2 + init_length*2];
+ misc_info = reset_sequence + reset_length;
+ if (startup)
+ for (i = 0; i < reset_length; i++)
+ outl(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+ for (i = 0; i < init_length; i++)
+ outl(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+ } else {
+ u8 *init_sequence = p + 2;
+ u8 *reset_sequence = p + 3 + init_length;
+ int reset_length = p[2 + init_length];
+ misc_info = (u16*)(reset_sequence + reset_length);
+ if (startup) {
+ outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ for (i = 0; i < reset_length; i++)
+ outl(reset_sequence[i], ioaddr + CSR12);
+ }
+ for (i = 0; i < init_length; i++)
+ outl(init_sequence[i], ioaddr + CSR12);
+ }
+ tmp_info = get_u16(&misc_info[1]);
+ if (tmp_info)
+ tp->advertising[phy_num] = tmp_info | 1;
+ if (tmp_info && startup < 2) {
+ if (tp->mii_advertise == 0)
+ tp->mii_advertise = tp->advertising[phy_num];
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
+ dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
+ }
+ break;
+ }
+ case 5: case 6: {
+ u16 setup[5];
+
+ new_csr6 = 0; /* FIXME */
+
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+ unsigned char *rst = rleaf->leafdata;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+
+ break;
+ }
+ default:
+ printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
+ dev->name, mleaf->type);
+ new_csr6 = 0x020E0000;
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ inl(ioaddr + CSR12) & 0xff);
+ } else if (tp->chip_id == DC21041) {
+ int port = dev->if_port <= 4 ? dev->if_port : 0;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+ dev->name, medianame[port == 3 ? 12: port],
+ inl(ioaddr + CSR12));
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ outl(t21041_csr14[port], ioaddr + CSR14);
+ outl(t21041_csr15[port], ioaddr + CSR15);
+ outl(t21041_csr13[port], ioaddr + CSR13);
+ new_csr6 = 0x80020000;
+ } else if (tp->chip_id == LC82C168) {
+ if (startup && ! tp->medialock)
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+ dev->name, inl(ioaddr + 0xB8), medianame[dev->if_port]);
+ if (tp->mii_cnt) {
+ new_csr6 = 0x810C0000;
+ outl(0x0001, ioaddr + CSR15);
+ outl(0x0201B07A, ioaddr + 0xB8);
+ } else if (startup) {
+ /* Start with 10mbps to do autonegotiation. */
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ outl(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ /* Trigger autonegotiation. */
+ outl(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+ } else {
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x1F078, ioaddr + 0xB8);
+ }
+ } else if (tp->chip_id == DC21040) { /* 21040 */
+ /* Turn on the xcvr interface. */
+ int csr12 = inl(ioaddr + CSR12);
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ if (tulip_media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ new_csr6 = 0x20000;
+ /* Set the full duplux match frame. */
+ outl(FULL_DUPLEX_MAGIC, ioaddr + CSR11);
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ if (t21040_csr13[dev->if_port] & 8) {
+ outl(0x0705, ioaddr + CSR14);
+ outl(0x0006, ioaddr + CSR15);
+ } else {
+ outl(0xffff, ioaddr + CSR14);
+ outl(0x0000, ioaddr + CSR15);
+ }
+ outl(0x8f01 | t21040_csr13[dev->if_port], ioaddr + CSR13);
+ } else { /* Unknown chip type with no media table. */
+ if (tp->default_port == 0)
+ dev->if_port = tp->mii_cnt ? 11 : 3;
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ new_csr6 = 0x020E0000;
+ } else if (tulip_media_cap[dev->if_port] & MediaIsFx) {
+ new_csr6 = 0x02860000;
+ } else
+ new_csr6 = 0x03860000;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: No media description table, assuming "
+ "%s transceiver, CSR12 %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ inl(ioaddr + CSR12));
+ }
+
+ tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
+ return;
+}
+
+/*
+ Check the MII negotiated duplex and change the CSR6 setting if
+ required.
+ Return 0 if everything is OK.
+ Return < 0 if the transceiver is missing or has no link beat.
+ */
+int tulip_check_duplex(struct net_device *dev)
+{
+ struct tulip_private *tp = dev->priv;
+ unsigned int bmsr, lpa, negotiated, new_csr6;
+
+ bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
+ lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
+ "%4.4x.\n", dev->name, bmsr, lpa);
+ if (bmsr == 0xffff)
+ return -2;
+ if ((bmsr & BMSR_LSTATUS) == 0) {
+ int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
+ if ((new_bmsr & BMSR_LSTATUS) == 0) {
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: No link beat on the MII interface,"
+ " status %4.4x.\n", dev->name, new_bmsr);
+ return -1;
+ }
+ }
+ negotiated = lpa & tp->advertising[0];
+ tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated);
+
+ new_csr6 = tp->csr6;
+
+ if (negotiated & LPA_100) new_csr6 &= ~TxThreshold;
+ else new_csr6 |= TxThreshold;
+ if (tp->full_duplex) new_csr6 |= FullDuplex;
+ else new_csr6 &= ~FullDuplex;
+
+ if (new_csr6 != tp->csr6) {
+ tp->csr6 = new_csr6;
+ tulip_restart_rxtx(tp);
+
+ if (tulip_debug > 0)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII"
+ "#%d link partner capability of %4.4x.\n",
+ dev->name, tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
+ return 1;
+ }
+
+ return 0;
+}
+
+void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
+{
+ struct tulip_private *tp = dev->priv;
+ int phyn, phy_idx = 0;
+ int mii_reg0;
+ int mii_advert;
+ unsigned int to_advert, new_bmcr, ane_switch;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+ int phy = phyn & 0x1f;
+ int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
+ if ((mii_status & 0x8301) == 0x8001 ||
+ ((mii_status & BMSR_100BASE4) == 0
+ && (mii_status & 0x7800) != 0)) {
+ /* preserve Becker logic, gain indentation level */
+ } else {
+ continue;
+ }
+
+ mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR);
+ mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE);
+ ane_switch = 0;
+
+ /* if not advertising at all, gen an
+ * advertising value from the capability
+ * bits in BMSR
+ */
+ if ((mii_advert & ADVERTISE_ALL) == 0) {
+ unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR);
+ mii_advert = ((tmpadv >> 6) & 0x3e0) | 1;
+ }
+
+ if (tp->mii_advertise) {
+ tp->advertising[phy_idx] =
+ to_advert = tp->mii_advertise;
+ } else if (tp->advertising[phy_idx]) {
+ to_advert = tp->advertising[phy_idx];
+ } else {
+ tp->advertising[phy_idx] =
+ tp->mii_advertise =
+ to_advert = mii_advert;
+ }
+
+ tp->phys[phy_idx++] = phy;
+
+ printk (KERN_INFO "tulip%d: MII transceiver #%d "
+ "config %4.4x status %4.4x advertising %4.4x.\n",
+ board_idx, phy, mii_reg0, mii_status, mii_advert);
+
+ /* Fixup for DLink with miswired PHY. */
+ if (mii_advert != to_advert) {
+ printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d,"
+ " previously advertising %4.4x.\n",
+ board_idx, to_advert, phy, mii_advert);
+ tulip_mdio_write (dev, phy, 4, to_advert);
+ }
+
+ /* Enable autonegotiation: some boards default to off. */
+ if (tp->default_port == 0) {
+ new_bmcr = mii_reg0 | BMCR_ANENABLE;
+ if (new_bmcr != mii_reg0) {
+ new_bmcr |= BMCR_ANRESTART;
+ ane_switch = 1;
+ }
+ }
+ /* ...or disable nway, if forcing media */
+ else {
+ new_bmcr = mii_reg0 & ~BMCR_ANENABLE;
+ if (new_bmcr != mii_reg0)
+ ane_switch = 1;
+ }
+
+ /* clear out bits we never want at this point */
+ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE |
+ BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK |
+ BMCR_RESET);
+
+ if (tp->full_duplex)
+ new_bmcr |= BMCR_FULLDPLX;
+ if (tulip_media_cap[tp->default_port] & MediaIs100)
+ new_bmcr |= BMCR_SPEED100;
+
+ if (new_bmcr != mii_reg0) {
+ /* some phys need the ANE switch to
+ * happen before forced media settings
+ * will "take." However, we write the
+ * same value twice in order not to
+ * confuse the sane phys.
+ */
+ if (ane_switch) {
+ tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
+ udelay (10);
+ }
+ tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
+ }
+ }
+ tp->mii_cnt = phy_idx;
+ if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+ printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+ board_idx);
+ tp->phys[0] = 1;
+ }
+}
--- /dev/null
+/*
+ drivers/net/tulip/pnic.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/kernel.h>
+#include "tulip.h"
+
+
+void pnic_do_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 phy_reg = inl(ioaddr + 0xB8);
+ u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+ if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ if (phy_reg & 0x20000000) dev->if_port = 5;
+ else if (phy_reg & 0x40000000) dev->if_port = 3;
+ else if (phy_reg & 0x10000000) dev->if_port = 4;
+ else if (phy_reg & 0x08000000) dev->if_port = 0;
+ tp->nwayset = 1;
+ new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
+ outl(0x32 | (dev->if_port & 1), ioaddr + CSR12);
+ if (dev->if_port & 1)
+ outl(0x1F868, ioaddr + 0xB8);
+ if (phy_reg & 0x30000000) {
+ tp->full_duplex = 1;
+ new_csr6 |= 0x00000200;
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ /* Restart Tx */
+ tulip_restart_rxtx(tp);
+ dev->trans_start = jiffies;
+ }
+ }
+}
+
+void pnic_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_reg = inl(ioaddr + 0xB8);
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
+ dev->name, phy_reg, csr5);
+ if (inl(ioaddr + CSR5) & TPLnkFail) {
+ outl((inl(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
+ /* If we use an external MII, then we mustn't use the
+ * internal negotiation.
+ */
+ if (tulip_media_cap[dev->if_port] & MediaIsMII)
+ return;
+ if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) {
+ tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
+ outl(tp->csr6, ioaddr + CSR6);
+ outl(0x30, ioaddr + CSR12);
+ outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ dev->trans_start = jiffies;
+ }
+ } else if (inl(ioaddr + CSR5) & TPLnkPass) {
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ spin_lock(&tp->lock);
+ tulip_check_duplex(dev);
+ spin_unlock(&tp->lock);
+ } else {
+ pnic_do_nway(dev);
+ }
+ outl((inl(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
+ }
+}
+
+void pnic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if(!inl(ioaddr + CSR7)) {
+ /* the timer was called due to a work overflow
+ * in the interrupt handler. Skip the connection
+ * checks, the nic is definitively speaking with
+ * his link partner.
+ */
+ goto too_good_connection;
+ }
+
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ spin_lock_irq(&tp->lock);
+ if (tulip_check_duplex(dev) > 0)
+ next_tick = 3*HZ;
+ spin_unlock_irq(&tp->lock);
+ } else {
+ int csr12 = inl(ioaddr + CSR12);
+ int new_csr6 = tp->csr6 & ~0x40C40200;
+ int phy_reg = inl(ioaddr + 0xB8);
+ int csr5 = inl(ioaddr + CSR5);
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
+ "CSR5 %8.8x.\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
+ if (phy_reg & 0x04000000) { /* Remote link fault */
+ outl(0x0201F078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ tp->nwayset = 0;
+ } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ pnic_do_nway(dev);
+ next_tick = 60*HZ;
+ } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
+ "CSR5 %8.8x, PHY %3.3x.\n",
+ dev->name, medianame[dev->if_port], csr12,
+ inl(ioaddr + CSR5), inl(ioaddr + 0xB8));
+ next_tick = 3*HZ;
+ if (tp->medialock) {
+ } else if (tp->nwayset && (dev->if_port & 1)) {
+ next_tick = 1*HZ;
+ } else if (dev->if_port == 0) {
+ dev->if_port = 3;
+ outl(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ outl(0x1F868, ioaddr + 0xB8);
+ } else {
+ dev->if_port = 0;
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x1F078, ioaddr + 0xB8);
+ }
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ /* Restart Tx */
+ tulip_restart_rxtx(tp);
+ dev->trans_start = jiffies;
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: Changing PNIC configuration to %s "
+ "%s-duplex, CSR6 %8.8x.\n",
+ dev->name, medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half", new_csr6);
+ }
+ }
+ }
+too_good_connection:
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ if(!inl(ioaddr + CSR7)) {
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name);
+ disable_irq(dev->irq);
+ tulip_refill_rx(dev);
+ enable_irq(dev->irq);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ }
+}
--- /dev/null
+/*
+ drivers/net/tulip/pnic2.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+ Modified to hep support PNIC_II by Kevin B. Hendricks
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+
+/* Understanding the PNIC_II - everything is this file is based
+ * on the PNIC_II_PDF datasheet which is sorely lacking in detail
+ *
+ * As I understand things, here are the registers and bits that
+ * explain the masks and constants used in this file that are
+ * either different from the 21142/3 or important for basic operation.
+ *
+ *
+ * CSR 6 (mask = 0xfe3bd1fd of bits not to change)
+ * -----
+ * Bit 24 - SCR
+ * Bit 23 - PCS
+ * Bit 22 - TTM (Trasmit Threshold Mode)
+ * Bit 18 - Port Select
+ * Bit 13 - Start - 1, Stop - 0 Transmissions
+ * Bit 11:10 - Loop Back Operation Mode
+ * Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
+ * Bit 1 - Start - 1, Stop - 0 Receive
+ *
+ *
+ * CSR 14 (mask = 0xfff0ee39 of bits not to change)
+ * ------
+ * Bit 19 - PAUSE-Pause
+ * Bit 18 - Advertise T4
+ * Bit 17 - Advertise 100baseTx-FD
+ * Bit 16 - Advertise 100baseTx-HD
+ * Bit 12 - LTE - Link Test Enable
+ * Bit 7 - ANE - Auto Negotiate Enable
+ * Bit 6 - HDE - Advertise 10baseT-HD
+ * Bit 2 - Reset to Power down - kept as 1 for normal operation
+ * Bit 1 - Loop Back enable for 10baseT MCC
+ *
+ *
+ * CSR 12
+ * ------
+ * Bit 25 - Partner can do T4
+ * Bit 24 - Partner can do 100baseTx-FD
+ * Bit 23 - Partner can do 100baseTx-HD
+ * Bit 22 - Partner can do 10baseT-FD
+ * Bit 21 - Partner can do 10baseT-HD
+ * Bit 15 - LPN is 1 if all above bits are valid other wise 0
+ * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
+ * Bit 3 - Autopolarity state
+ * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed
+ * Bit 1 - LS100B - link state of 100baseT 0 - good, 1- faild
+ *
+ *
+ * Data Port Selection Info
+ *-------------------------
+ *
+ * CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT
+ * 1 0 0 (X) 0 (X) 1 NWAY
+ * 0 0 1 0 (X) 0 10baseT
+ * 0 1 0 1 1 (X) 100baseT
+ *
+ *
+ */
+
+
+
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+
+void pnic2_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 3)
+ printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n",
+ dev->name,inl(ioaddr + CSR12));
+
+ if (next_tick) {
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ }
+}
+
+
+void pnic2_start_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr14;
+ int csr12;
+
+ /* set up what to advertise during the negotiation */
+
+ /* load in csr14 and mask off bits not to touch
+ * comment at top of file explains mask value
+ */
+ csr14 = (inl(ioaddr + CSR14) & 0xfff0ee39);
+
+ /* bit 17 - advetise 100baseTx-FD */
+ if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
+
+ /* bit 16 - advertise 100baseTx-HD */
+ if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
+
+ /* bit 6 - advertise 10baseT-HD */
+ if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
+
+ /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
+ * and bit 0 Don't PowerDown 10baseT
+ */
+ csr14 |= 0x00001184;
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
+ "csr14=%8.8x.\n", dev->name, csr14);
+
+ /* tell pnic2_lnk_change we are doing an nway negotiation */
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+
+ /* now we have to set up csr6 for NWAY state */
+
+ tp->csr6 = inl(ioaddr + CSR6);
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: On Entry to Nway, "
+ "csr6=%8.8x.\n", dev->name, tp->csr6);
+
+ /* mask off any bits not to touch
+ * comment at top of file explains mask value
+ */
+ tp->csr6 = tp->csr6 & 0xfe3bd1fd;
+
+ /* don't forget that bit 9 is also used for advertising */
+ /* advertise 10baseT-FD for the negotiation (bit 9) */
+ if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
+
+ /* set bit 24 for nway negotiation mode ...
+ * see Data Port Selection comment at top of file
+ * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
+ */
+ tp->csr6 |= 0x01000000;
+ outl(csr14, ioaddr + CSR14);
+ outl(tp->csr6, ioaddr + CSR6);
+ udelay(100);
+
+ /* all set up so now force the negotiation to begin */
+
+ /* read in current values and mask off all but the
+ * Autonegotiation bits 14:12. Writing a 001 to those bits
+ * should start the autonegotiation
+ */
+ csr12 = (inl(ioaddr + CSR12) & 0xffff8fff);
+ csr12 |= 0x1000;
+ outl(csr12, ioaddr + CSR12);
+}
+
+
+
+void pnic2_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr14;
+
+ /* read the staus register to find out what is up */
+ int csr12 = inl(ioaddr + CSR12);
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, "
+ " CSR5 %x, %8.8x.\n", dev->name, csr12,
+ csr5, inl(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability.
+ * check bits 14:12 for bit pattern 101 - all is good
+ */
+ if (tp->nway && !tp->nwayset) {
+
+ /* we did an auto negotiation */
+
+ if ((csr12 & 0x7000) == 0x5000) {
+
+ /* negotiation ended successfully */
+
+ /* get the link partners reply and mask out all but
+ * bits 24-21 which show the partners capabilites
+ * and match those to what we advertised
+ *
+ * then begin to interpret the results of the negotiation.
+ * Always go in this order : (we are ignoring T4 for now)
+ * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD
+ */
+
+ int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise;
+ tp->lpar = (csr12 >> 16);
+ tp->nwayset = 1;
+
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: funny autonegotiate result "
+ "csr12 %8.8x advertising %4.4x\n",
+ dev->name, csr12, tp->sym_advertise);
+ tp->nwayset = 0;
+ /* so check if 100baseTx link state is okay */
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+
+ /* now record the duplex that was negotiated */
+ tp->full_duplex = 0;
+ if ((dev->if_port == 4) || (dev->if_port == 5))
+ tp->full_duplex = 1;
+
+ if (tulip_debug > 1) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+ dev->name, medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar, negotiated);
+ }
+
+ /* remember to turn off bit 7 - autonegotiate
+ * enable so we can properly end nway mode and
+ * set duplex (ie. use csr6<9> again)
+ */
+ csr14 = (inl(ioaddr + CSR14) & 0xffffff7f);
+ outl(csr14,ioaddr + CSR14);
+
+
+ /* now set the data port and operating mode
+ * (see the Data Port Selection comments at
+ * the top of the file
+ */
+
+ /* get current csr6 and mask off bits not to touch */
+ /* see comment at top of file */
+
+ tp->csr6 = (inl(ioaddr + CSR6) & 0xfe3bd1fd);
+
+ /* so if using if_port 3 or 5 then select the 100baseT
+ * port else select the 10baseT port.
+ * See the Data Port Selection table at the top
+ * of the file which was taken from the PNIC_II.PDF
+ * datasheet
+ */
+ if (dev->if_port & 1) tp->csr6 |= 0x01840000;
+ else tp->csr6 |= 0x00400000;
+
+ /* now set the full duplex bit appropriately */
+ if (tp->full_duplex) tp->csr6 |= 0x00000200;
+
+ outl(1, ioaddr + CSR13);
+
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 "
+ "%8.8x.\n", dev->name, tp->csr6,
+ inl(ioaddr + CSR6), inl(ioaddr + CSR12));
+
+ /* now the following actually writes out the
+ * new csr6 values
+ */
+ tulip_start_rxtx(tp);
+
+ return;
+
+ } else {
+ printk(KERN_INFO "%s: Autonegotiation failed, "
+ "using %s, link beat status %4.4x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+
+ /* remember to turn off bit 7 - autonegotiate
+ * enable so we don't forget
+ */
+ csr14 = (inl(ioaddr + CSR14) & 0xffffff7f);
+ outl(csr14,ioaddr + CSR14);
+
+ /* what should we do when autonegotiate fails?
+ * should we try again or default to baseline
+ * case. I just don't know.
+ *
+ * for now default to some baseline case
+ */
+
+ dev->if_port = 0;
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* set to 10baseTx-HD - see Data Port Selection
+ * comment given at the top of the file
+ */
+ tp->csr6 = (inl(ioaddr + CSR6) & 0xfe3bd1fd);
+ tp->csr6 |= 0x00400000;
+
+ tulip_restart_rxtx(tp);
+
+ return;
+
+ }
+ }
+
+ if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
+
+ /* Link blew? Maybe restart NWay. */
+
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Ugh! Link blew?\n", dev->name);
+
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+
+ return;
+ }
+
+
+ if (dev->if_port == 3 || dev->if_port == 5) {
+
+ /* we are at 100mb and a potential link change occurred */
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+
+ /* check 100 link beat */
+
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* if failed then try doing an nway to get in sync */
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ }
+
+ return;
+ }
+
+ if (dev->if_port == 0 || dev->if_port == 4) {
+
+ /* we are at 10mb and a potential link change occurred */
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
+
+
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* if failed, try doing an nway to get in sync */
+ if ((csr12 & 4) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ }
+
+ return;
+ }
+
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name);
+
+ /* if all else fails default to trying 10baseT-HD */
+ dev->if_port = 0;
+
+ /* make sure autonegotiate enable is off */
+ csr14 = (inl(ioaddr + CSR14) & 0xffffff7f);
+ outl(csr14,ioaddr + CSR14);
+
+ /* set to 10baseTx-HD - see Data Port Selection
+ * comment given at the top of the file
+ */
+ tp->csr6 = (inl(ioaddr + CSR6) & 0xfe3bd1fd);
+ tp->csr6 |= 0x00400000;
+
+ tulip_restart_rxtx(tp);
+}
+
--- /dev/null
+/*
+ drivers/net/tulip/timer.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include "tulip.h"
+
+
+void tulip_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 csr12 = inl(ioaddr + CSR12);
+ int next_tick = 2*HZ;
+
+ if (tulip_debug > 2) {
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
+ " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name, medianame[dev->if_port], inl(ioaddr + CSR5),
+ inl(ioaddr + CSR6), csr12, inl(ioaddr + CSR13),
+ inl(ioaddr + CSR14), inl(ioaddr + CSR15));
+ }
+ switch (tp->chip_id) {
+ case DC21040:
+ if (!tp->medialock && csr12 & 0x0002) { /* Network error */
+ printk(KERN_INFO "%s: No link beat found.\n",
+ dev->name);
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ tulip_select_media(dev, 0);
+ dev->trans_start = jiffies;
+ }
+ break;
+ case DC21041:
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: 21041 media tick CSR12 %8.8x.\n",
+ dev->name, csr12);
+ if (tp->medialock) break;
+ switch (dev->if_port) {
+ case 0: case 3: case 4:
+ if (csr12 & 0x0004) { /*LnkFail */
+ /* 10baseT is dead. Check for activity on alternate port. */
+ tp->mediasense = 1;
+ if (csr12 & 0x0200)
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ printk(KERN_INFO "%s: No 21041 10baseT link beat, Media switched to %s.\n",
+ dev->name, medianame[dev->if_port]);
+ outl(0, ioaddr + CSR13); /* Reset */
+ outl(t21041_csr14[dev->if_port], ioaddr + CSR14);
+ outl(t21041_csr15[dev->if_port], ioaddr + CSR15);
+ outl(t21041_csr13[dev->if_port], ioaddr + CSR13);
+ next_tick = 10*HZ; /* 2.4 sec. */
+ } else
+ next_tick = 30*HZ;
+ break;
+ case 1: /* 10base2 */
+ case 2: /* AUI */
+ if (csr12 & 0x0100) {
+ next_tick = (30*HZ); /* 30 sec. */
+ tp->mediasense = 0;
+ } else if ((csr12 & 0x0004) == 0) {
+ printk(KERN_INFO "%s: 21041 media switched to 10baseT.\n",
+ dev->name);
+ dev->if_port = 0;
+ tulip_select_media(dev, 0);
+ next_tick = (24*HZ)/10; /* 2.4 sec. */
+ } else if (tp->mediasense || (csr12 & 0x0002)) {
+ dev->if_port = 3 - dev->if_port; /* Swap ports. */
+ tulip_select_media(dev, 0);
+ next_tick = 20*HZ;
+ } else {
+ next_tick = 20*HZ;
+ }
+ break;
+ }
+ break;
+ case DC21140:
+ case DC21142:
+ case MX98713:
+ case COMPEX9881:
+ case DM910X:
+ default: {
+ struct medialeaf *mleaf;
+ unsigned char *p;
+ if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */
+ /* Not much that can be done.
+ Assume this a generic MII or SYM transceiver. */
+ next_tick = 60*HZ;
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
+ "CSR12 0x%2.2x.\n",
+ dev->name, inl(ioaddr + CSR6), csr12 & 0xff);
+ break;
+ }
+ mleaf = &tp->mtable->mleaf[tp->cur_index];
+ p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: case 4: {
+ /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */
+ int offset = mleaf->type == 4 ? 5 : 2;
+ s8 bitnum = p[offset];
+ if (p[offset+1] & 0x80) {
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG"%s: Transceiver monitor tick "
+ "CSR12=%#2.2x, no media sense.\n",
+ dev->name, csr12);
+ if (mleaf->type == 4) {
+ if (mleaf->media == 3 && (csr12 & 0x02))
+ goto select_next_media;
+ }
+ break;
+ }
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
+ " bit %d is %d, expecting %d.\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
+ /* Check that the specified bit has the proper value. */
+ if ((bitnum < 0) !=
+ ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+ medianame[mleaf->media & MEDIA_MASK]);
+ if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
+ goto actually_mii;
+ /* netif_carrier_on(dev); */
+ break;
+ }
+ /* netif_carrier_off(dev); */
+ if (tp->medialock)
+ break;
+ select_next_media:
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+ if (tulip_media_cap[dev->if_port] & MediaIsFD)
+ goto select_next_media; /* Skip FD entries. */
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: No link beat on media %s,"
+ " trying transceiver type %s.\n",
+ dev->name, medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ tulip_select_media(dev, 0);
+ /* Restart the transmit process. */
+ tulip_restart_rxtx(tp);
+ next_tick = (24*HZ)/10;
+ break;
+ }
+ case 1: case 3: /* 21140, 21142 MII */
+ actually_mii:
+ if (tulip_check_duplex(dev) < 0)
+ { /* netif_carrier_off(dev); */ }
+ else
+ { /* netif_carrier_on(dev); */ }
+ next_tick = 60*HZ;
+ break;
+ case 2: /* 21142 serial block has no link beat. */
+ default:
+ break;
+ }
+ }
+ break;
+ }
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
+
+void mxic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 3) {
+ printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
+ inl(ioaddr + CSR12));
+ }
+ if (next_tick) {
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ }
+}
+
+
+void comet_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
+ "%4.4x.\n",
+ dev->name, inl(ioaddr + 0xB8), inl(ioaddr + 0xC8));
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
--- /dev/null
+/*
+ drivers/net/tulip/tulip.h
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#ifndef __NET_TULIP_H__
+#define __NET_TULIP_H__
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+
+
+/* undefine, or define to various debugging levels (>4 == obscene levels) */
+#define TULIP_DEBUG 1
+
+/* undefine USE_IO_OPS for MMIO, define for PIO */
+#ifdef CONFIG_TULIP_MMIO
+# undef USE_IO_OPS
+#else
+# define USE_IO_OPS 1
+#endif
+
+
+
+struct tulip_chip_table {
+ char *chip_name;
+ int io_size;
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+ void (*media_timer) (unsigned long data);
+};
+
+
+enum tbl_flag {
+ HAS_MII = 0x0001,
+ HAS_MEDIA_TABLE = 0x0002,
+ CSR12_IN_SROM = 0x0004,
+ ALWAYS_CHECK_MII = 0x0008,
+ HAS_ACPI = 0x0010,
+ MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */
+ HAS_PNICNWAY = 0x0080,
+ HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */
+ HAS_INTR_MITIGATION = 0x0100,
+ IS_ASIX = 0x0200,
+ HAS_8023X = 0x0400,
+ COMET_MAC_ADDR = 0x0800,
+ HAS_PCI_MWI = 0x1000,
+ HAS_PHY_IRQ = 0x2000,
+};
+
+
+/* chip types. careful! order is VERY IMPORTANT here, as these
+ * are used throughout the driver as indices into arrays */
+/* Note 21142 == 21143. */
+enum chips {
+ DC21040 = 0,
+ DC21041 = 1,
+ DC21140 = 2,
+ DC21142 = 3, DC21143 = 3,
+ LC82C168,
+ MX98713,
+ MX98715,
+ MX98725,
+ AX88140,
+ PNIC2,
+ COMET,
+ COMPEX9881,
+ I21145,
+ DM910X,
+ CONEXANT,
+};
+
+
+enum MediaIs {
+ MediaIsFD = 1,
+ MediaAlwaysFD = 2,
+ MediaIsMII = 4,
+ MediaIsFx = 8,
+ MediaIs100 = 16
+};
+
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+ CSR0 = 0,
+ CSR1 = 0x08,
+ CSR2 = 0x10,
+ CSR3 = 0x18,
+ CSR4 = 0x20,
+ CSR5 = 0x28,
+ CSR6 = 0x30,
+ CSR7 = 0x38,
+ CSR8 = 0x40,
+ CSR9 = 0x48,
+ CSR10 = 0x50,
+ CSR11 = 0x58,
+ CSR12 = 0x60,
+ CSR13 = 0x68,
+ CSR14 = 0x70,
+ CSR15 = 0x78,
+};
+
+/* register offset and bits for CFDD PCI config reg */
+enum pci_cfg_driver_reg {
+ CFDD = 0x40,
+ CFDD_Sleep = (1 << 31),
+ CFDD_Snooze = (1 << 30),
+};
+
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+ TimerInt = 0x800,
+ SytemError = 0x2000,
+ TPLnkFail = 0x1000,
+ TPLnkPass = 0x10,
+ NormalIntr = 0x10000,
+ AbnormalIntr = 0x8000,
+ RxJabber = 0x200,
+ RxDied = 0x100,
+ RxNoBuf = 0x80,
+ RxIntr = 0x40,
+ TxFIFOUnderflow = 0x20,
+ TxJabber = 0x08,
+ TxNoBuf = 0x04,
+ TxDied = 0x02,
+ TxIntr = 0x01,
+};
+
+
+enum tulip_mode_bits {
+ TxThreshold = (1 << 22),
+ FullDuplex = (1 << 9),
+ TxOn = 0x2000,
+ AcceptBroadcast = 0x0100,
+ AcceptAllMulticast = 0x0080,
+ AcceptAllPhys = 0x0040,
+ AcceptRunt = 0x0008,
+ RxOn = 0x0002,
+ RxTx = (TxOn | RxOn),
+};
+
+
+enum tulip_busconfig_bits {
+ MWI = (1 << 24),
+ MRL = (1 << 23),
+ MRM = (1 << 21),
+ CALShift = 14,
+ BurstLenShift = 8,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2;
+};
+
+
+struct tulip_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2; /* We use only buffer 1. */
+};
+
+
+enum desc_status_bits {
+ DescOwned = 0x80000000,
+ RxDescFatalErr = 0x8000,
+ RxWholePkt = 0x0300,
+};
+
+
+enum t21041_csr13_bits {
+ csr13_eng = (0xEF0<<4), /* for eng. purposes only, hardcode at EF0h */
+ csr13_aui = (1<<3), /* clear to force 10bT, set to force AUI/BNC */
+ csr13_cac = (1<<2), /* CSR13/14/15 autoconfiguration */
+ csr13_srl = (1<<0), /* When reset, resets all SIA functions, machines */
+
+ csr13_mask_auibnc = (csr13_eng | csr13_aui | csr13_srl),
+ csr13_mask_10bt = (csr13_eng | csr13_srl),
+};
+
+enum t21143_csr6_bits {
+ csr6_sc = (1<<31),
+ csr6_ra = (1<<30),
+ csr6_ign_dest_msb = (1<<26),
+ csr6_mbo = (1<<25),
+ csr6_scr = (1<<24), /* scramble mode flag: can't be set */
+ csr6_pcs = (1<<23), /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */
+ csr6_ttm = (1<<22), /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */
+ csr6_sf = (1<<21), /* Store and forward. If set ignores TR bits */
+ csr6_hbd = (1<<19), /* Heart beat disable. Disables SQE function in 10baseT */
+ csr6_ps = (1<<18), /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */
+ csr6_ca = (1<<17), /* Collision Offset Enable. If set uses special algorithm in low collision situations */
+ csr6_trh = (1<<15), /* Transmit Threshold high bit */
+ csr6_trl = (1<<14), /* Transmit Threshold low bit */
+
+ /***************************************************************
+ * This table shows transmit threshold values based on media *
+ * and these two registers (from PNIC1 & 2 docs) Note: this is *
+ * all meaningless if sf is set. *
+ ***************************************************************/
+
+ /***********************************
+ * (trh,trl) * 100BaseTX * 10BaseT *
+ ***********************************
+ * (0,0) * 128 * 72 *
+ * (0,1) * 256 * 96 *
+ * (1,0) * 512 * 128 *
+ * (1,1) * 1024 * 160 *
+ ***********************************/
+
+ csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */
+ csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
+ csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
+ /* set both and you get (PHY) loopback */
+ csr6_fd = (1<<9), /* Full duplex mode, disables hearbeat, no loopback */
+ csr6_pm = (1<<7), /* Pass All Multicast */
+ csr6_pr = (1<<6), /* Promiscuous mode */
+ csr6_sb = (1<<5), /* Start(1)/Stop(0) backoff counter */
+ csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */
+ csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */
+ csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */
+ csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */
+
+ csr6_mask_capture = (csr6_sc | csr6_ca),
+ csr6_mask_defstate = (csr6_mask_capture | csr6_mbo),
+ csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps),
+ csr6_mask_hdcaptt = (csr6_mask_hdcap | csr6_trh | csr6_trl),
+ csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd),
+ csr6_mask_fullpromisc = (csr6_pr | csr6_pm),
+ csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if),
+ csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
+};
+
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#undef TX_RING_SIZE
+#undef RX_RING_SIZE
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+
+#define MEDIA_MASK 31
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */
+
+#if defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+#define TULIP_MAX_CACHE_LINE 16 /* in units of 32-bit words */
+#else
+#define TULIP_MAX_CACHE_LINE 32 /* in units of 32-bit words */
+#endif
+
+
+/* Ring-wrap flag in length field, use for last ring entry.
+ 0x01000000 means chain on buffer2 address,
+ 0x02000000 means use the ring start address in CSR2/3.
+ Note: Some work-alike chips do not function correctly in chained mode.
+ The ASIX chip works only in chained mode.
+ Thus we indicates ring mode, but always write the 'next' field for
+ chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+
+#define EEPROM_SIZE 128 /* 2 << EEPROM_ADDRLEN */
+
+
+#define RUN_AT(x) (jiffies + (x))
+
+#if defined(__i386__) /* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+struct medialeaf {
+ u8 type;
+ u8 media;
+ unsigned char *leafdata;
+};
+
+
+struct mediatable {
+ u16 defaultmedia;
+ u8 leafcount;
+ u8 csr12dir; /* General purpose pin directions. */
+ unsigned has_mii:1;
+ unsigned has_nonmii:1;
+ unsigned has_reset:6;
+ u32 csr15dir;
+ u32 csr15val; /* 21143 NWay setting. */
+ struct medialeaf mleaf[0];
+};
+
+
+struct mediainfo {
+ struct mediainfo *next;
+ int info_type;
+ int index;
+ unsigned char *info;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+
+struct tulip_private {
+ const char *product_name;
+ struct net_device *next_module;
+ struct tulip_rx_desc *rx_ring;
+ struct tulip_tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct ring_info tx_buffers[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct ring_info rx_buffers[RX_RING_SIZE];
+ u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
+ int chip_id;
+ int revision;
+ int flags;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ u32 mc_filter[2];
+ spinlock_t lock;
+ spinlock_t mii_lock;
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+#define RX_A_NBF_STOP 0xffffff3f /* To disable RX and RX-NOBUF ints. */
+ int fc_bit;
+ int mit_sel;
+ int mit_change; /* Signal for Interrupt Mitigtion */
+#endif
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int full_duplex_lock:1;
+ unsigned int fake_addr:1; /* Multiport board faked address. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+ unsigned int csr0; /* CSR0 setting. */
+ unsigned int csr6; /* Current CSR6 control settings. */
+ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
+ void (*link_change) (struct net_device * dev, int csr5);
+ u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
+ u16 lpar; /* 21143 Link partner ability. */
+ u16 advertising[4];
+ signed char phys[4], mii_cnt; /* MII device addresses. */
+ struct mediatable *mtable;
+ int cur_index; /* Current media index. */
+ int saved_if_port;
+ struct pci_dev *pdev;
+ int ttimer;
+ int susp_rx;
+ unsigned long nir;
+ unsigned long base_addr;
+ int csr12_shadow;
+ int pad0; /* Used for 8-byte alignment */
+};
+
+
+struct eeprom_fixup {
+ char *name;
+ unsigned char addr0;
+ unsigned char addr1;
+ unsigned char addr2;
+ u16 newtable[32]; /* Max length below. */
+};
+
+
+/* 21142.c */
+extern u16 t21142_csr14[];
+void t21142_timer(unsigned long data);
+void t21142_start_nway(struct net_device *dev);
+void t21142_lnk_change(struct net_device *dev, int csr5);
+
+
+/* PNIC2.c */
+void pnic2_lnk_change(struct net_device *dev, int csr5);
+void pnic2_timer(unsigned long data);
+void pnic2_start_nway(struct net_device *dev);
+void pnic2_lnk_change(struct net_device *dev, int csr5);
+
+/* eeprom.c */
+void tulip_parse_eeprom(struct net_device *dev);
+int tulip_read_eeprom(long ioaddr, int location, int addr_len);
+
+/* interrupt.c */
+extern unsigned int tulip_max_interrupt_work;
+extern int tulip_rx_copybreak;
+void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+int tulip_refill_rx(struct net_device *dev);
+
+/* media.c */
+int tulip_mdio_read(struct net_device *dev, int phy_id, int location);
+void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int value);
+void tulip_select_media(struct net_device *dev, int startup);
+int tulip_check_duplex(struct net_device *dev);
+void tulip_find_mii (struct net_device *dev, int board_idx);
+
+/* pnic.c */
+void pnic_do_nway(struct net_device *dev);
+void pnic_lnk_change(struct net_device *dev, int csr5);
+void pnic_timer(unsigned long data);
+
+/* timer.c */
+void tulip_timer(unsigned long data);
+void mxic_timer(unsigned long data);
+void comet_timer(unsigned long data);
+
+/* tulip_core.c */
+extern int tulip_debug;
+extern const char * const medianame[];
+extern const char tulip_media_cap[];
+extern struct tulip_chip_table tulip_tbl[];
+extern u8 t21040_csr13[];
+extern u16 t21041_csr13[];
+extern u16 t21041_csr14[];
+extern u16 t21041_csr15[];
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb(addr) readb((void*)(addr))
+#define inw(addr) readw((void*)(addr))
+#define inl(addr) readl((void*)(addr))
+#define outb(val,addr) writeb((val), (void*)(addr))
+#define outw(val,addr) writew((val), (void*)(addr))
+#define outl(val,addr) writel((val), (void*)(addr))
+#endif /* !USE_IO_OPS */
+
+
+
+static inline void tulip_start_rxtx(struct tulip_private *tp)
+{
+ long ioaddr = tp->base_addr;
+ outl(tp->csr6 | RxTx, ioaddr + CSR6);
+ barrier();
+ (void) inl(ioaddr + CSR6); /* mmio sync */
+}
+
+static inline void tulip_stop_rxtx(struct tulip_private *tp)
+{
+ long ioaddr = tp->base_addr;
+ u32 csr6 = inl(ioaddr + CSR6);
+
+ if (csr6 & RxTx) {
+ outl(csr6 & ~RxTx, ioaddr + CSR6);
+ barrier();
+ (void) inl(ioaddr + CSR6); /* mmio sync */
+ }
+}
+
+static inline void tulip_restart_rxtx(struct tulip_private *tp)
+{
+ tulip_stop_rxtx(tp);
+ udelay(5);
+ tulip_start_rxtx(tp);
+}
+
+#endif /* __NET_TULIP_H__ */
--- /dev/null
+/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
+
+/*
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000-2002 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#define DRV_NAME "tulip"
+#define DRV_VERSION "0.9.15-pre12"
+#define DRV_RELDATE "Aug 9, 2002"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+#ifdef __sparc__
+#include <asm/pbm.h>
+#endif
+
+static char version[] __devinitdata =
+ "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
+
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static unsigned int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
+
+/* The possible media types that can be set in options[] are: */
+const char * const medianame[32] = {
+ "10baseT", "10base2", "AUI", "100baseTx",
+ "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+ "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+ "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+ "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+ "","","","", "","","","", "","","","Transceiver reset",
+};
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
+ || defined(__sparc_) || defined(__ia64__) \
+ || defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+ Set the bus performance register.
+ Typical: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Warning: many older 486 systems are broken and require setting 0x00A04800
+ 8 longword cache alignment, 8 longword burst.
+ ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__)
+static int csr0 = 0x01A00000 | 0x8000;
+#elif defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+static int csr0 = 0x01A00000 | 0x9000;
+#elif defined(__arm__) || defined(__sh__)
+static int csr0 = 0x01A00000 | 0x4800;
+#elif defined(__mips__)
+static int csr0 = 0x00200000 | 0x4000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+
+MODULE_AUTHOR("The Linux Kernel Team");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(tulip_debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(csr0, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+
+#define PFX DRV_NAME ": "
+
+#ifdef TULIP_DEBUG
+int tulip_debug = TULIP_DEBUG;
+#else
+int tulip_debug = 1;
+#endif
+
+
+
+/*
+ * This table use during operation for capabilities and media timer.
+ *
+ * It is indexed via the values in 'enum chips'
+ */
+
+struct tulip_chip_table tulip_tbl[] = {
+ /* DC21040 */
+ { "Digital DC21040 Tulip", 128, 0x0001ebef, 0, tulip_timer },
+
+ /* DC21041 */
+ { "Digital DC21041 Tulip", 128, 0x0001ebef,
+ HAS_MEDIA_TABLE | HAS_NWAY, tulip_timer },
+
+ /* DC21140 */
+ { "Digital DS21140 Tulip", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
+
+ /* DC21142, DC21143 */
+ { "Digital DS21143 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
+ | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
+
+ /* LC82C168 */
+ { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
+ HAS_MII | HAS_PNICNWAY, pnic_timer },
+
+ /* MX98713 */
+ { "Macronix 98713 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+
+ /* MX98715 */
+ { "Macronix 98715 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+
+ /* MX98725 */
+ { "Macronix 98725 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+
+ /* AX88140 */
+ { "ASIX AX88140", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
+ | IS_ASIX, tulip_timer },
+
+ /* PNIC2 */
+ { "Lite-On PNIC-II", 256, 0x0801fbff,
+ HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
+
+ /* COMET */
+ { "ADMtek Comet", 256, 0x0001abef,
+ MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+
+ /* COMPEX9881 */
+ { "Compex 9881 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+
+ /* I21145 */
+ { "Intel DS21145 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
+ | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
+
+ /* DM910X */
+ { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
+ tulip_timer },
+
+ /* CONEXANT */
+ { "Conexant LANfinity", 256, 0x0001ebef,
+ HAS_MII, tulip_timer },
+};
+
+
+static struct pci_device_id tulip_pci_tbl[] __devinitdata = {
+ { 0x1011, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21040 },
+ { 0x1011, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21041 },
+ { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
+ { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
+ { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
+ { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
+ { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
+ { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
+ { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
+ { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
+ { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+ { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+ { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+ { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+ { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
+
+
+/* A full-duplex map for media types. */
+const char tulip_media_cap[32] =
+{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
+u8 t21040_csr13[] = {2,0x0C,8,4, 4,0,0,0, 0,0,0,0, 4,0,0,0};
+
+/* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+u16 t21041_csr13[] = {
+ csr13_mask_10bt, /* 10-T */
+ csr13_mask_auibnc, /* 10-2 */
+ csr13_mask_auibnc, /* AUI */
+ csr13_mask_10bt, /* 10-T */
+ csr13_mask_10bt, /* 10T-FD */
+};
+u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_init_ring(struct net_device *dev);
+static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int tulip_open(struct net_device *dev);
+static int tulip_close(struct net_device *dev);
+static void tulip_up(struct net_device *dev);
+static void tulip_down(struct net_device *dev);
+static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+static void tulip_set_power_state (struct tulip_private *tp,
+ int sleep, int snooze)
+{
+ if (tp->flags & HAS_ACPI) {
+ u32 tmp, newtmp;
+ pci_read_config_dword (tp->pdev, CFDD, &tmp);
+ newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
+ if (sleep)
+ newtmp |= CFDD_Sleep;
+ else if (snooze)
+ newtmp |= CFDD_Snooze;
+ if (tmp != newtmp)
+ pci_write_config_dword (tp->pdev, CFDD, newtmp);
+ }
+
+}
+
+
+static void tulip_up(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 3*HZ;
+ int i;
+
+ /* Wake the chip from sleep/snooze mode. */
+ tulip_set_power_state (tp, 0, 0);
+
+ /* On some chip revs we must set the MII/SYM port before the reset!? */
+ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
+ outl(0x00040000, ioaddr + CSR6);
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ outl(0x00000001, ioaddr + CSR0);
+ udelay(100);
+
+ /* Deassert reset.
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ outl(tp->csr0, ioaddr + CSR0);
+ udelay(100);
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+
+ outl(tp->rx_ring_dma, ioaddr + CSR3);
+ outl(tp->tx_ring_dma, ioaddr + CSR4);
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ if (tp->flags & MC_HASH_ONLY) {
+ u32 addr_low = cpu_to_le32(get_unaligned((u32 *)dev->dev_addr));
+ u32 addr_high = cpu_to_le32(get_unaligned((u16 *)(dev->dev_addr+4)));
+ if (tp->chip_id == AX88140) {
+ outl(0, ioaddr + CSR13);
+ outl(addr_low, ioaddr + CSR14);
+ outl(1, ioaddr + CSR13);
+ outl(addr_high, ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(addr_low, ioaddr + 0xA4);
+ outl(addr_high, ioaddr + 0xA8);
+ outl(0, ioaddr + 0xAC);
+ outl(0, ioaddr + 0xB0);
+ }
+ } else {
+ /* This is set_rx_mode(), but without starting the transmitter. */
+ u16 *eaddrs = (u16 *)dev->dev_addr;
+ u16 *setup_frm = &tp->setup_frame[15*6];
+ dma_addr_t mapping;
+
+ /* 21140 bug: you must add the broadcast address. */
+ memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
+ /* Fill the final entry of the table with our physical address. */
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+
+ mapping = pci_map_single(tp->pdev, tp->setup_frame,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ tp->tx_buffers[tp->cur_tx].skb = NULL;
+ tp->tx_buffers[tp->cur_tx].mapping = mapping;
+
+ /* Put the setup frame on the Tx list. */
+ tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
+ tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
+ tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
+
+ tp->cur_tx++;
+ }
+
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+
+ /* Allow selecting a default media. */
+ i = 0;
+ if (tp->mtable == NULL)
+ goto media_picked;
+ if (dev->if_port) {
+ int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
+ (dev->if_port == 12 ? 0 : dev->if_port);
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using user-specified media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ goto media_picked;
+ }
+ }
+ if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+ int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+ dev->name, medianame[looking_for]);
+ goto media_picked;
+ }
+ }
+ /* Start sensing first non-full-duplex media. */
+ for (i = tp->mtable->leafcount - 1;
+ (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+ ;
+media_picked:
+
+ tp->csr6 = 0;
+ tp->cur_index = i;
+ tp->nwayset = 0;
+
+ if (dev->if_port) {
+ if (tp->chip_id == DC21143 &&
+ (tulip_media_cap[dev->if_port] & MediaIsMII)) {
+ /* We must reset the media CSRs when we force-select MII mode. */
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ outl(0x0008, ioaddr + CSR15);
+ }
+ tulip_select_media(dev, 1);
+ } else if (tp->chip_id == DC21041) {
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+ tp->csr6 = 0x80020000;
+ if (tp->sym_advertise & 0x0040)
+ tp->csr6 |= FullDuplex;
+ outl(tp->csr6, ioaddr + CSR6);
+ outl(0x0000EF01, ioaddr + CSR13);
+
+ } else if (tp->chip_id == DC21142) {
+ if (tp->mii_cnt) {
+ tulip_select_media(dev, 1);
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: Using MII transceiver %d, status "
+ "%4.4x.\n",
+ dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
+ outl(csr6_mask_defstate, ioaddr + CSR6);
+ tp->csr6 = csr6_mask_hdcap;
+ dev->if_port = 11;
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ } else
+ t21142_start_nway(dev);
+ } else if (tp->chip_id == PNIC2) {
+ /* for initial startup advertise 10/100 Full and Half */
+ tp->sym_advertise = 0x01E0;
+ /* enable autonegotiate end interrupt */
+ outl(inl(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
+ outl(inl(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
+ pnic2_start_nway(dev);
+ } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
+ outl(0x0001, ioaddr + CSR15);
+ } else if (inl(ioaddr + CSR5) & TPLnkPass)
+ pnic_do_nway(dev);
+ else {
+ /* Start with 10mbps to do autonegotiation. */
+ outl(0x32, ioaddr + CSR12);
+ tp->csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ }
+ } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
+ && ! tp->medialock) {
+ dev->if_port = 0;
+ tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
+ /* Provided by BOLO, Macronix - 12/10/1998. */
+ dev->if_port = 0;
+ tp->csr6 = 0x01a80200;
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+ } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
+ /* Enable automatic Tx underrun recovery. */
+ outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ tp->csr6 = 0x00040000;
+ } else if (tp->chip_id == AX88140) {
+ tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+ } else
+ tulip_select_media(dev, 1);
+
+ /* Start the chip's Tx to process setup frame. */
+ tulip_stop_rxtx(tp);
+ barrier();
+ udelay(5);
+ outl(tp->csr6 | TxOn, ioaddr + CSR6);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ tulip_start_rxtx(tp);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+
+ if (tulip_debug > 2) {
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+ dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
+ inl(ioaddr + CSR6));
+ }
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ tp->timer.expires = RUN_AT(next_tick);
+ add_timer(&tp->timer);
+}
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+/* Enable receiver */
+void tulip_xon(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+
+ clear_bit(tp->fc_bit, &netdev_fc_xoff);
+ if (netif_running(dev)){
+
+ tulip_refill_rx(dev);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, dev->base_addr+CSR7);
+ }
+}
+#endif
+
+static int
+tulip_open(struct net_device *dev)
+{
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+#endif
+ int retval;
+ MOD_INC_USE_COUNT;
+
+ if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))) {
+ MOD_DEC_USE_COUNT;
+ return retval;
+ }
+
+ tulip_init_ring (dev);
+
+ tulip_up (dev);
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ tp->fc_bit = netdev_register_fc(dev, tulip_xon);
+#endif
+
+ netif_start_queue (dev);
+
+ return 0;
+}
+
+
+static void tulip_tx_timeout(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
+ dev->name);
+ } else if (tp->chip_id == DC21040) {
+ if ( !tp->medialock && inl(ioaddr + CSR12) & 0x0002) {
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ printk(KERN_INFO "%s: 21040 transmit timed out, switching to "
+ "%s.\n",
+ dev->name, medianame[dev->if_port]);
+ tulip_select_media(dev, 0);
+ }
+ goto out;
+ } else if (tp->chip_id == DC21041) {
+ int csr12 = inl(ioaddr + CSR12);
+
+ printk(KERN_WARNING "%s: 21041 transmit timed out, status %8.8x, "
+ "CSR12 %8.8x, CSR13 %8.8x, CSR14 %8.8x, resetting...\n",
+ dev->name, inl(ioaddr + CSR5), csr12,
+ inl(ioaddr + CSR13), inl(ioaddr + CSR14));
+ tp->mediasense = 1;
+ if ( ! tp->medialock) {
+ if (dev->if_port == 1 || dev->if_port == 2)
+ if (csr12 & 0x0004) {
+ dev->if_port = 2 - dev->if_port;
+ } else
+ dev->if_port = 0;
+ else
+ dev->if_port = 1;
+ tulip_select_media(dev, 0);
+ }
+ } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
+ || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
+ || tp->chip_id == DM910X) {
+ printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+ dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12),
+ inl(ioaddr + CSR13), inl(ioaddr + CSR14), inl(ioaddr + CSR15));
+ if ( ! tp->medialock && tp->mtable) {
+ do
+ --tp->cur_index;
+ while (tp->cur_index >= 0
+ && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
+ & MediaIsFD));
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ tulip_select_media(dev, 0);
+ printk(KERN_WARNING "%s: transmit timed out, switching to %s "
+ "media.\n", dev->name, medianame[dev->if_port]);
+ }
+ } else if (tp->chip_id == PNIC2) {
+ printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
+ "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6),
+ (int)inl(ioaddr + CSR7), (int)inl(ioaddr + CSR12));
+ } else {
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
+ "%8.8x, resetting...\n",
+ dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12));
+ dev->if_port = 0;
+ }
+
+#if defined(way_too_many_messages)
+ if (tulip_debug > 3) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+ int j;
+ printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
+ "%2.2x %2.2x %2.2x.\n",
+ i, (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
+ for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ if (j < 100) printk(" %2.2x", buf[j]);
+ printk(" j=%d.\n", j);
+ }
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+ printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Stop and restart the chip's Tx processes . */
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if (tp->fc_bit && test_bit(tp->fc_bit,&netdev_fc_xoff))
+ printk("BUG tx_timeout restarting rx when fc on\n");
+#endif
+ tulip_restart_rxtx(tp);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+
+ tp->stats.tx_errors++;
+
+out:
+ spin_unlock_irqrestore (&tp->lock, flags);
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+ tp->susp_rx = 0;
+ tp->ttimer = 0;
+ tp->nir = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x00000000;
+ tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
+ tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
+ tp->rx_buffers[i].skb = NULL;
+ tp->rx_buffers[i].mapping = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
+ tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t mapping;
+
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ tp->rx_buffers[i].skb = skb;
+ if (skb == NULL)
+ break;
+ mapping = pci_map_single(tp->pdev, skb->tail,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[i].mapping = mapping;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
+ tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_buffers[i].skb = NULL;
+ tp->tx_buffers[i].mapping = 0;
+ tp->tx_ring[i].status = 0x00000000;
+ tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
+ }
+ tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
+}
+
+static int
+tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry;
+ u32 flag;
+ dma_addr_t mapping;
+ unsigned long eflags;
+
+ if (skb_shinfo(skb)->nr_frags != 0)
+ BUG();
+
+ spin_lock_irqsave(&tp->lock, eflags);
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_buffers[entry].skb = skb;
+ mapping = pci_map_single(tp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ tp->tx_buffers[entry].mapping = mapping;
+ tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+ if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+ flag = 0x60000000; /* No interrupt */
+ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+ flag = 0xe0000000; /* Tx-done intr. */
+ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+ flag = 0x60000000; /* No Tx-done intr. */
+ } else { /* Leave room for set_rx_mode() to fill entries. */
+ flag = 0xe0000000; /* Tx-done intr. */
+ netif_stop_queue(dev);
+ }
+ if (entry == TX_RING_SIZE-1)
+ flag = 0xe0000000 | DESC_RING_WRAP;
+
+ tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+ /* if we were using Transmit Automatic Polling, we would need a
+ * wmb() here. */
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ wmb();
+
+ tp->cur_tx++;
+
+ /* Trigger an immediate transmit demand. */
+ outl(0, dev->base_addr + CSR1);
+
+ spin_unlock_irqrestore(&tp->lock, eflags);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static void tulip_clean_tx_ring(struct tulip_private *tp)
+{
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0) {
+ tp->stats.tx_errors++; /* It wasn't Txed */
+ tp->tx_ring[entry].status = 0;
+ }
+
+ /* Check for Tx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ }
+}
+
+static void tulip_down (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *) dev->priv;
+ unsigned long flags;
+
+ del_timer_sync (&tp->timer);
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl (0x00000000, ioaddr + CSR7);
+
+ /* Stop the Tx and Rx processes. */
+ tulip_stop_rxtx(tp);
+
+ /* prepare receive buffers */
+ tulip_refill_rx(dev);
+
+ /* release any unconsumed transmit buffers */
+ tulip_clean_tx_ring(tp);
+
+ /* 21040 -- Leave the card in 10baseT state. */
+ if (tp->chip_id == DC21040)
+ outl (0x00000004, ioaddr + CSR13);
+
+ if (inl (ioaddr + CSR6) != 0xffffffff)
+ tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff;
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
+ dev->if_port = tp->saved_if_port;
+
+ /* Leave the driver in snooze, not sleep, mode. */
+ tulip_set_power_state (tp, 0, 1);
+}
+
+
+static int tulip_close (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *) dev->priv;
+ int i;
+
+ netif_stop_queue (dev);
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if (tp->fc_bit) {
+ int bit = tp->fc_bit;
+ tp->fc_bit = 0;
+ netdev_unregister_fc(bit);
+ }
+#endif
+ tulip_down (dev);
+
+ if (tulip_debug > 1)
+ printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inl (ioaddr + CSR5));
+
+ free_irq (dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_buffers[i].skb;
+ dma_addr_t mapping = tp->rx_buffers[i].mapping;
+
+ tp->rx_buffers[i].skb = NULL;
+ tp->rx_buffers[i].mapping = 0;
+
+ tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
+ tp->rx_ring[i].length = 0;
+ tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+ pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb (skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->tx_buffers[i].skb;
+
+ if (skb != NULL) {
+ pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb (skb);
+ }
+ tp->tx_buffers[i].skb = NULL;
+ tp->tx_buffers[i].mapping = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return &tp->stats;
+}
+
+
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct tulip_private *np = dev->priv;
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ strcpy(info.bus_info, np->pdev->slot_name);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tulip_private *tp = dev->priv;
+ long ioaddr = dev->base_addr;
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
+ const unsigned int phy_idx = 0;
+ int phy = tp->phys[phy_idx] & 0x1f;
+ unsigned int regnum = data->reg_num;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
+
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ if (tp->mii_cnt)
+ data->phy_id = phy;
+ else if (tp->flags & HAS_NWAY)
+ data->phy_id = 32;
+ else if (tp->chip_id == COMET)
+ data->phy_id = 1;
+ else
+ return -ENODEV;
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
+ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
+ int csr12 = inl (ioaddr + CSR12);
+ int csr14 = inl (ioaddr + CSR14);
+ switch (regnum) {
+ case 0:
+ if (((csr14<<5) & 0x1000) ||
+ (dev->if_port == 5 && tp->nwayset))
+ data->val_out = 0x1000;
+ else
+ data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
+ | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
+ break;
+ case 1:
+ data->val_out =
+ 0x1848 +
+ ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
+ ((csr12&0x06) == 6 ? 0 : 4);
+ if (tp->chip_id != DC21041)
+ data->val_out |= 0x6048;
+ break;
+ case 4:
+ /* Advertised value, bogus 10baseTx-FD value from CSR6. */
+ data->val_out =
+ ((inl(ioaddr + CSR6) >> 3) & 0x0040) +
+ ((csr14 >> 1) & 0x20) + 1;
+ if (tp->chip_id != DC21041)
+ data->val_out |= ((csr14 >> 9) & 0x03C0);
+ break;
+ case 5: data->val_out = tp->lpar; break;
+ default: data->val_out = 0; break;
+ }
+ } else {
+ data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
+ if (!capable (CAP_NET_ADMIN))
+ return -EPERM;
+ if (regnum & ~0x1f)
+ return -EINVAL;
+ if (data->phy_id == phy) {
+ u16 value = data->val_in;
+ switch (regnum) {
+ case 0: /* Check for autonegotiation on or reset. */
+ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (tp->full_duplex_lock)
+ tp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4:
+ tp->advertising[phy_idx] =
+ tp->mii_advertise = data->val_in;
+ break;
+ }
+ }
+ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
+ u16 value = data->val_in;
+ if (regnum == 0) {
+ if ((value & 0x1200) == 0x1200) {
+ if (tp->chip_id == PNIC2) {
+ pnic2_start_nway (dev);
+ } else {
+ t21142_start_nway (dev);
+ }
+ }
+ } else if (regnum == 4)
+ tp->sym_advertise = value;
+ } else {
+ tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+#undef set_bit_le
+#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
+
+static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ u16 hash_table[32];
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit_le(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+
+ set_bit_le(index, hash_table);
+
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &tp->setup_frame[13*6];
+ }
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+ setup_frm = &tp->setup_frame[15*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr6;
+
+ csr6 = inl(ioaddr + CSR6) & ~0x00D5;
+
+ tp->csr6 &= ~0x00D5;
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else if (tp->flags & MC_HASH_ONLY) {
+ /* Some work-alikes have only a 64-entry hash filter table. */
+ /* Should verify correctness on big-endian/__powerpc__ */
+ struct dev_mc_list *mclist;
+ int i;
+ if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else {
+ u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
+ int filterbit;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ if (tp->flags & COMET_MAC_ADDR)
+ filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ else
+ filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit &= 0x3f;
+ mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
+ if (tulip_debug > 2) {
+ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1],
+ mclist->dmi_addr[2], mclist->dmi_addr[3],
+ mclist->dmi_addr[4], mclist->dmi_addr[5],
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ }
+ }
+ if (mc_filter[0] == tp->mc_filter[0] &&
+ mc_filter[1] == tp->mc_filter[1])
+ ; /* No change. */
+ else if (tp->flags & IS_ASIX) {
+ outl(2, ioaddr + CSR13);
+ outl(mc_filter[0], ioaddr + CSR14);
+ outl(3, ioaddr + CSR13);
+ outl(mc_filter[1], ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(mc_filter[0], ioaddr + 0xAC);
+ outl(mc_filter[1], ioaddr + 0xB0);
+ }
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ }
+ } else {
+ unsigned long flags;
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ build_setup_frame_hash(tp->setup_frame, dev);
+ } else {
+ build_setup_frame_perfect(tp->setup_frame, dev);
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+ } else {
+ u32 tx_flags = 0x08000000 | 192;
+ unsigned int entry;
+ int dummy = -1;
+
+ /* Now add this frame to the Tx list. */
+
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ tp->tx_ring[entry].length =
+ (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
+ tp->tx_ring[entry].buffer1 = 0;
+ /* Must set DescOwned later to avoid race with chip */
+ dummy = entry;
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+ }
+
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping =
+ pci_map_single(tp->pdev, tp->setup_frame,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ /* Put the setup frame on the Tx list. */
+ if (entry == TX_RING_SIZE-1)
+ tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
+ tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
+ tp->tx_ring[entry].buffer1 =
+ cpu_to_le32(tp->tx_buffers[entry].mapping);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ if (dummy >= 0)
+ tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
+ netif_stop_queue(dev);
+
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ }
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ outl(csr6, ioaddr + CSR6);
+}
+
+#ifdef CONFIG_TULIP_MWI
+static void __devinit tulip_mwi_config (struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ struct tulip_private *tp = dev->priv;
+ u8 cache;
+ u16 pci_command;
+ u32 csr0;
+
+ if (tulip_debug > 3)
+ printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pdev->slot_name);
+
+ tp->csr0 = csr0 = 0;
+
+ /* if we have any cache line size at all, we can do MRM */
+ csr0 |= MRM;
+
+ /* ...and barring hardware bugs, MWI */
+ if (!(tp->chip_id == DC21143 && tp->revision == 65))
+ csr0 |= MWI;
+
+ /* set or disable MWI in the standard PCI command bit.
+ * Check for the case where mwi is desired but not available
+ */
+ if (csr0 & MWI) pci_set_mwi(pdev);
+ else pci_clear_mwi(pdev);
+
+ /* read result from hardware (in case bit refused to enable) */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
+ csr0 &= ~MWI;
+
+ /* if cache line size hardwired to zero, no MWI */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if ((csr0 & MWI) && (cache == 0)) {
+ csr0 &= ~MWI;
+ pci_clear_mwi(pdev);
+ }
+
+ /* assign per-cacheline-size cache alignment and
+ * burst length values
+ */
+ switch (cache) {
+ case 8:
+ csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 16:
+ csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 32:
+ csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
+ break;
+ default:
+ cache = 0;
+ break;
+ }
+
+ /* if we have a good cache line size, we by now have a good
+ * csr0, so save it and exit
+ */
+ if (cache)
+ goto out;
+
+ /* we don't have a good csr0 or cache line size, disable MWI */
+ if (csr0 & MWI) {
+ pci_clear_mwi(pdev);
+ csr0 &= ~MWI;
+ }
+
+ /* sane defaults for burst length and cache alignment
+ * originally from de4x5 driver
+ */
+ csr0 |= (8 << BurstLenShift) | (1 << CALShift);
+
+out:
+ tp->csr0 = csr0;
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
+ pdev->slot_name, cache, csr0);
+}
+#endif
+
+static int __devinit tulip_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct tulip_private *tp;
+ /* See note below on the multiport cards. */
+ static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+ static int last_irq;
+ static int multiport_cnt; /* For four-port boards w/one EEPROM */
+ u8 chip_rev;
+ int i, irq;
+ unsigned short sum;
+ u8 ee_data[EEPROM_SIZE];
+ struct net_device *dev;
+ long ioaddr;
+ static int board_idx = -1;
+ int chip_idx = ent->driver_data;
+ unsigned int t2104x_mode = 0;
+ unsigned int eeprom_missing = 0;
+ unsigned int force_csr0 = 0;
+
+#ifndef MODULE
+ static int did_version; /* Already printed version info. */
+ if (tulip_debug > 0 && did_version++ == 0)
+ printk (KERN_INFO "%s", version);
+#endif
+
+ board_idx++;
+
+ /*
+ * Lan media wire a tulip chip to a wan interface. Needs a very
+ * different driver (lmc driver)
+ */
+
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
+ printk (KERN_ERR PFX "skipping LMC card.\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Early DM9100's need software CRC and the DMFE driver
+ */
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
+ {
+ u32 dev_rev;
+ /* Read Chip revision */
+ pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
+ if(dev_rev < 0x02000030)
+ {
+ printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Looks for early PCI chipsets where people report hangs
+ * without the workarounds being on.
+ */
+
+ /* Intel Saturn. Switch to 8 long words burst, 8 long word cache aligned
+ Aries might need this too. The Saturn errata are not pretty reading but
+ thankfully its an old 486 chipset.
+ */
+
+ if (pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424, NULL)) {
+ csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
+ force_csr0 = 1;
+ }
+ /* The dreaded SiS496 486 chipset. Same workaround as above. */
+ if (pci_find_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, NULL)) {
+ csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
+ force_csr0 = 1;
+ }
+
+ /* bugfix: the ASIX must have a burst limit or horrible things happen. */
+ if (chip_idx == AX88140) {
+ if ((csr0 & 0x3f00) == 0)
+ csr0 |= 0x2000;
+ }
+
+ /* PNIC doesn't have MWI/MRL/MRM... */
+ if (chip_idx == LC82C168)
+ csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
+
+ /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+ csr0 &= ~0x01f100ff;
+
+#if defined(__sparc__)
+ /* DM9102A needs 32-dword alignment/burst length on sparc - chip bug? */
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+ csr0 = (csr0 & ~0xff00) | 0xe000;
+#endif
+
+ /*
+ * And back to business
+ */
+
+ i = pci_enable_device(pdev);
+ if (i) {
+ printk (KERN_ERR PFX
+ "Cannot enable tulip board #%d, aborting\n",
+ board_idx);
+ return i;
+ }
+
+ ioaddr = pci_resource_start (pdev, 0);
+ irq = pdev->irq;
+
+ /* alloc_etherdev ensures aligned and zeroed private structures */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (!dev) {
+ printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+
+ if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+ printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, "
+ "aborting\n", pdev->slot_name,
+ pci_resource_len (pdev, 0),
+ pci_resource_start (pdev, 0));
+ goto err_out_free_netdev;
+ }
+
+ /* grab all resources from both PIO and MMIO regions, as we
+ * don't want anyone else messing around with our hardware */
+ if (pci_request_regions (pdev, "tulip"))
+ goto err_out_free_netdev;
+
+#ifndef USE_IO_OPS
+ ioaddr = (unsigned long) ioremap (pci_resource_start (pdev, 1),
+ tulip_tbl[chip_idx].io_size);
+ if (!ioaddr)
+ goto err_out_free_res;
+#endif
+
+ pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
+
+ /*
+ * initialize private data structure 'tp'
+ * it is zeroed and aligned in alloc_etherdev
+ */
+ tp = dev->priv;
+
+ tp->rx_ring = pci_alloc_consistent(pdev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma);
+ if (!tp->rx_ring)
+ goto err_out_mtable;
+ tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
+ tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
+
+ tp->chip_id = chip_idx;
+ tp->flags = tulip_tbl[chip_idx].flags;
+ tp->pdev = pdev;
+ tp->base_addr = ioaddr;
+ tp->revision = chip_rev;
+ tp->csr0 = csr0;
+ spin_lock_init(&tp->lock);
+ spin_lock_init(&tp->mii_lock);
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
+ dev->base_addr = ioaddr;
+
+#ifdef CONFIG_TULIP_MWI
+ if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ tulip_mwi_config (pdev, dev);
+#else
+ /* MWI is broken for DC21143 rev 65... */
+ if (chip_idx == DC21143 && chip_rev == 65)
+ tp->csr0 &= ~MWI;
+#endif
+
+ /* Stop the chip's Tx and Rx processes. */
+ tulip_stop_rxtx(tp);
+
+ pci_set_master(pdev);
+
+ /* Clear the missed-packet counter. */
+ inl(ioaddr + CSR8);
+
+ if (chip_idx == DC21041) {
+ if (inl(ioaddr + CSR9) & 0x8000) {
+ chip_idx = DC21040;
+ t2104x_mode = 1;
+ } else {
+ t2104x_mode = 2;
+ }
+ }
+
+ /* The station address ROM is read byte serially. The register must
+ be polled, waiting for the value to be read bit serially from the
+ EEPROM.
+ */
+ sum = 0;
+ if (chip_idx == DC21040) {
+ outl(0, ioaddr + CSR9); /* Reset the pointer with a dummy write. */
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ dev->dev_addr[i] = value;
+ sum += value & 0xff;
+ }
+ } else if (chip_idx == LC82C168) {
+ for (i = 0; i < 3; i++) {
+ int value, boguscnt = 100000;
+ outl(0x600 | i, ioaddr + 0x98);
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
+ sum += value & 0xffff;
+ }
+ } else if (chip_idx == COMET) {
+ /* No need to read the EEPROM. */
+ put_unaligned(inl(ioaddr + 0xA4), (u32 *)dev->dev_addr);
+ put_unaligned(inl(ioaddr + 0xA8), (u16 *)(dev->dev_addr + 4));
+ for (i = 0; i < 6; i ++)
+ sum += dev->dev_addr[i];
+ } else {
+ /* A serial EEPROM interface, we read now and sort it out later. */
+ int sa_offset = 0;
+ int ee_addr_size = tulip_read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
+
+ for (i = 0; i < sizeof(ee_data)/2; i++)
+ ((u16 *)ee_data)[i] =
+ le16_to_cpu(tulip_read_eeprom(ioaddr, i, ee_addr_size));
+
+ /* DEC now has a specification (see Notes) but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(eedata, eedata+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+ if (chip_idx == CONEXANT) {
+ /* Check that the tuple type and length is correct. */
+ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
+ sa_offset = 0x19A;
+ }
+ if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
+ ee_data[2] == 0) {
+ sa_offset = 2; /* Grrr, damn Matrox boards. */
+ multiport_cnt = 4;
+ }
+#ifdef CONFIG_DDB5476
+ if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 6)) {
+ /* DDB5476 MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* No media table either */
+ tp->flags &= ~HAS_MEDIA_TABLE;
+ }
+#endif
+#ifdef CONFIG_DDB5477
+ if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
+ /* DDB5477 MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* No media table either */
+ tp->flags &= ~HAS_MEDIA_TABLE;
+ }
+#endif
+#ifdef CONFIG_MIPS_COBALT
+ if ((pdev->bus->number == 0) &&
+ ((PCI_SLOT(pdev->devfn) == 7) ||
+ (PCI_SLOT(pdev->devfn) == 12))) {
+ /* Cobalt MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* No media table either */
+ tp->flags &= ~HAS_MEDIA_TABLE;
+ }
+#endif
+#ifdef __hppa__
+ /* 3x5 HSC (J3514A) has a broken srom */
+ if(ee_data[0] == 0x61 && ee_data[1] == 0x10) {
+ /* pci_vendor_id and subsystem_id are swapped */
+ ee_data[0] = ee_data[2];
+ ee_data[1] = ee_data[3];
+ ee_data[2] = 0x61;
+ ee_data[3] = 0x10;
+
+ /* srom need to be byte-swaped and shifted up 1 word.
+ * This shift needs to happen at the end of the MAC
+ * first because of the 2 byte overlap.
+ */
+ for(i = 4; i >= 0; i -= 2) {
+ ee_data[17 + i + 3] = ee_data[17 + i];
+ ee_data[16 + i + 5] = ee_data[16 + i];
+ }
+ }
+#endif
+ for (i = 0; i < 6; i ++) {
+ dev->dev_addr[i] = ee_data[i + sa_offset];
+ sum += ee_data[i + sa_offset];
+ }
+ }
+ /* Lite-On boards have the address byte-swapped. */
+ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0)
+ && dev->dev_addr[1] == 0x00)
+ for (i = 0; i < 6; i+=2) {
+ char tmp = dev->dev_addr[i];
+ dev->dev_addr[i] = dev->dev_addr[i+1];
+ dev->dev_addr[i+1] = tmp;
+ }
+ /* On the Zynx 315 Etherarray and other multiport boards only the
+ first Tulip has an EEPROM.
+ On Sparc systems the mac address is held in the OBP property
+ "local-mac-address".
+ The addresses of the subsequent ports are derived from the first.
+ Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+ that here as well. */
+ if (sum == 0 || sum == 6*0xff) {
+#if defined(__sparc__)
+ struct pcidev_cookie *pcp = pdev->sysdata;
+#endif
+ eeprom_missing = 1;
+ for (i = 0; i < 5; i++)
+ dev->dev_addr[i] = last_phys_addr[i];
+ dev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__sparc__)
+ if ((pcp != NULL) && prom_getproplen(pcp->prom_node,
+ "local-mac-address") == 6) {
+ prom_getproperty(pcp->prom_node, "local-mac-address",
+ dev->dev_addr, 6);
+ }
+#endif
+#if defined(__i386__) /* Patch up x86 BIOS bug. */
+ if (last_irq)
+ irq = last_irq;
+#endif
+ }
+
+ for (i = 0; i < 6; i++)
+ last_phys_addr[i] = dev->dev_addr[i];
+ last_irq = irq;
+ dev->irq = irq;
+
+ /* The lower four bits are the media type. */
+ if (board_idx >= 0 && board_idx < MAX_UNITS) {
+ if (options[board_idx] & MEDIA_MASK)
+ tp->default_port = options[board_idx] & MEDIA_MASK;
+ if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
+ tp->full_duplex = 1;
+ if (mtu[board_idx] > 0)
+ dev->mtu = mtu[board_idx];
+ }
+ if (dev->mem_start & MEDIA_MASK)
+ tp->default_port = dev->mem_start & MEDIA_MASK;
+ if (tp->default_port) {
+ printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ tp->medialock = 1;
+ if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ }
+ if (tp->full_duplex)
+ tp->full_duplex_lock = 1;
+
+ if (tulip_media_cap[tp->default_port] & MediaIsMII) {
+ u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ tp->mii_advertise = media2advert[tp->default_port - 9];
+ tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+ }
+
+ if (tp->flags & HAS_MEDIA_TABLE) {
+ memcpy(tp->eeprom, ee_data, sizeof(tp->eeprom));
+
+ sprintf(dev->name, "tulip%d", board_idx); /* hack */
+ tulip_parse_eeprom(dev);
+ strcpy(dev->name, "eth%d"); /* un-hack */
+ }
+
+ if ((tp->flags & ALWAYS_CHECK_MII) ||
+ (tp->mtable && tp->mtable->has_mii) ||
+ ( ! tp->mtable && (tp->flags & HAS_MII))) {
+ if (tp->mtable && tp->mtable->has_mii) {
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == 11) {
+ tp->cur_index = i;
+ tp->saved_if_port = dev->if_port;
+ tulip_select_media(dev, 2);
+ dev->if_port = tp->saved_if_port;
+ break;
+ }
+ }
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs
+ later, but takes much time. */
+ tulip_find_mii (dev, board_idx);
+ }
+
+ /* The Tulip-specific entries in the device structure. */
+ dev->open = tulip_open;
+ dev->hard_start_xmit = tulip_start_xmit;
+ dev->tx_timeout = tulip_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->stop = tulip_close;
+ dev->get_stats = tulip_get_stats;
+ dev->do_ioctl = private_ioctl;
+ dev->set_multicast_list = set_rx_mode;
+
+ if (register_netdev(dev))
+ goto err_out_free_ring;
+
+ printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+ dev->name, tulip_tbl[chip_idx].chip_name, chip_rev, ioaddr);
+ pci_set_drvdata(pdev, dev);
+
+ if (t2104x_mode == 1)
+ printk(" 21040 compatible mode,");
+ else if (t2104x_mode == 2)
+ printk(" 21041 mode,");
+ if (eeprom_missing)
+ printk(" EEPROM not present,");
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
+ printk(", IRQ %d.\n", irq);
+
+ if (tp->chip_id == PNIC2)
+ tp->link_change = pnic2_lnk_change;
+ else if ((tp->flags & HAS_NWAY) || tp->chip_id == DC21041)
+ tp->link_change = t21142_lnk_change;
+ else if (tp->flags & HAS_PNICNWAY)
+ tp->link_change = pnic_lnk_change;
+
+ /* Reset the xcvr interface and turn on heartbeat. */
+ switch (chip_idx) {
+ case DC21041:
+ if (tp->sym_advertise == 0)
+ tp->sym_advertise = 0x0061;
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+ outl(inl(ioaddr + CSR6) | csr6_fd, ioaddr + CSR6);
+ outl(0x0000EF01, ioaddr + CSR13);
+ break;
+ case DC21040:
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0x00000004, ioaddr + CSR13);
+ break;
+ case DC21140:
+ case DM910X:
+ default:
+ if (tp->mtable)
+ outl(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+ break;
+ case DC21142:
+ if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
+ outl(csr6_mask_defstate, ioaddr + CSR6);
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ outl(csr6_mask_hdcap, ioaddr + CSR6);
+ } else
+ t21142_start_nway(dev);
+ break;
+ case PNIC2:
+ /* just do a reset for sanity sake */
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ break;
+ case LC82C168:
+ if ( ! tp->mii_cnt) {
+ tp->nway = 1;
+ tp->nwayset = 0;
+ outl(csr6_ttm | csr6_ca, ioaddr + CSR6);
+ outl(0x30, ioaddr + CSR12);
+ outl(0x0001F078, ioaddr + CSR6);
+ outl(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
+ }
+ break;
+ case MX98713:
+ case COMPEX9881:
+ outl(0x00000000, ioaddr + CSR6);
+ outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+ outl(0x00000001, ioaddr + CSR13);
+ break;
+ case MX98715:
+ case MX98725:
+ outl(0x01a80000, ioaddr + CSR6);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00001000, ioaddr + CSR12);
+ break;
+ case COMET:
+ /* No initialization necessary. */
+ break;
+ }
+
+ /* put the chip in snooze mode until opened */
+ tulip_set_power_state (tp, 0, 1);
+
+ alert_slow_netdevice(dev, tulip_tbl[chip_idx].chip_name);
+
+ return 0;
+
+err_out_free_ring:
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+
+err_out_mtable:
+ if (tp->mtable)
+ kfree (tp->mtable);
+#ifndef USE_IO_OPS
+ iounmap((void *)ioaddr);
+
+err_out_free_res:
+#endif
+ pci_release_regions (pdev);
+
+err_out_free_netdev:
+ kfree (dev);
+ return -ENODEV;
+}
+
+
+#ifdef CONFIG_PM
+
+static int tulip_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev && netif_running (dev) && netif_device_present (dev)) {
+ netif_device_detach (dev);
+ tulip_down (dev);
+ /* pci_power_off(pdev, -1); */
+ }
+ return 0;
+}
+
+
+static int tulip_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev && netif_running (dev) && !netif_device_present (dev)) {
+#if 1
+ pci_enable_device (pdev);
+#endif
+ /* pci_power_on(pdev); */
+ tulip_up (dev);
+ netif_device_attach (dev);
+ }
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static void __devexit tulip_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct tulip_private *tp;
+
+ if (!dev)
+ return;
+
+ tp = dev->priv;
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+ unregister_netdev (dev);
+ if (tp->mtable)
+ kfree (tp->mtable);
+#ifndef USE_IO_OPS
+ iounmap((void *)dev->base_addr);
+#endif
+ kfree (dev);
+ pci_release_regions (pdev);
+ pci_set_drvdata (pdev, NULL);
+
+ /* pci_power_off (pdev, -1); */
+}
+
+
+static struct pci_driver tulip_driver = {
+ name: DRV_NAME,
+ id_table: tulip_pci_tbl,
+ probe: tulip_init_one,
+ remove: __devexit_p(tulip_remove_one),
+#ifdef CONFIG_PM
+ suspend: tulip_suspend,
+ resume: tulip_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init tulip_init (void)
+{
+#ifdef MODULE
+ printk (KERN_INFO "%s", version);
+#endif
+
+ /* copy module parms into globals */
+ tulip_rx_copybreak = rx_copybreak;
+ tulip_max_interrupt_work = max_interrupt_work;
+
+ /* probe for and init boards */
+ return pci_module_init (&tulip_driver);
+}
+
+
+static void __exit tulip_cleanup (void)
+{
+ pci_unregister_driver (&tulip_driver);
+}
+
+
+module_init(tulip_init);
+module_exit(tulip_cleanup);
--- /dev/null
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+ Written 1998-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is designed for the VIA VT86C100A Rhine-I.
+ It also works with the 6102 Rhine-II, and 6105/6105M Rhine-III.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This driver contains some changes from the original Donald Becker
+ version. He may or may not be interested in bug reports on this
+ code. You can find his versions at:
+ http://www.scyld.com/network/via-rhine.html
+
+
+ Linux kernel version history:
+
+ LK1.1.0:
+ - Jeff Garzik: softnet 'n stuff
+
+ LK1.1.1:
+ - Justin Guyett: softnet and locking fixes
+ - Jeff Garzik: use PCI interface
+
+ LK1.1.2:
+ - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
+
+ LK1.1.3:
+ - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
+ code) update "Theory of Operation" with
+ softnet/locking changes
+ - Dave Miller: PCI DMA and endian fixups
+ - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
+
+ LK1.1.4:
+ - Urban Widmark: fix gcc 2.95.2 problem and
+ remove writel's to fixed address 0x7c
+
+ LK1.1.5:
+ - Urban Widmark: mdio locking, bounce buffer changes
+ merges from Beckers 1.05 version
+ added netif_running_on/off support
+
+ LK1.1.6:
+ - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
+ set netif_running_on/off on startup, del_timer_sync
+
+ LK1.1.7:
+ - Manfred Spraul: added reset into tx_timeout
+
+ LK1.1.9:
+ - Urban Widmark: merges from Beckers 1.10 version
+ (media selection + eeprom reload)
+ - David Vrabel: merges from D-Link "1.11" version
+ (disable WOL and PME on startup)
+
+ LK1.1.10:
+ - Manfred Spraul: use "singlecopy" for unaligned buffers
+ don't allocate bounce buffers for !ReqTxAlign cards
+
+ LK1.1.11:
+ - David Woodhouse: Set dev->base_addr before the first time we call
+ wait_for_reset(). It's a lot happier that way.
+ Free np->tx_bufs only if we actually allocated it.
+
+ LK1.1.12:
+ - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
+
+ LK1.1.13 (jgarzik):
+ - Add ethtool support
+ - Replace some MII-related magic numbers with constants
+
+ LK1.1.14 (Ivan G.):
+ - fixes comments for Rhine-III
+ - removes W_MAX_TIMEOUT (unused)
+ - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
+ is R-I and has Davicom chip, flag is referenced in kernel driver)
+ - sends chip_id as a parameter to wait_for_reset since np is not
+ initialized on first call
+ - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
+ for Rhine-III's (documentation says same bit is correct)
+ - transmit frame queue message is off by one - fixed
+ - adds IntrNormalSummary to "Something Wicked" exclusion list
+ so normal interrupts will not trigger the message (src: Donald Becker)
+ (Roger Luethi)
+ - show confused chip where to continue after Tx error
+ - location of collision counter is chip specific
+ - allow selecting backoff algorithm (module parameter)
+
+ LK1.1.15 (jgarzik):
+ - Use new MII lib helper generic_mii_ioctl
+
+ LK1.1.16 (Roger Luethi)
+ - Etherleak fix
+ - Handle Tx buffer underrun
+ - Fix bugs in full duplex handling
+ - New reset code uses "force reset" cmd on Rhine-II
+ - Various clean ups
+
+ LK1.1.17 (Roger Luethi)
+ - Fix race in via_rhine_start_tx()
+ - On errors, wait for Tx engine to turn off before scavenging
+ - Handle Tx descriptor write-back race on Rhine-II
+ - Force flushing for PCI posted writes
+ - More reset code changes
+
+*/
+
+#define DRV_NAME "via-rhine"
+#define DRV_VERSION "1.1.17"
+#define DRV_RELDATE "March-1-2003"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#undef RX_RING_SIZE
+#undef TX_RING_SIZE
+
+/* A few user-configurable values.
+ These may be modified when a driver module is loaded. */
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Select a backoff algorithm (Ethernet capture effect) */
+static int backoff;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Rhine has a 64 element 8390-like hash table. */
+static const int multicast_filter_limit = 32;
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 16
+
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
+KERN_INFO " http://www.scyld.com/network/via-rhine.html\n";
+
+static char shortname[] = DRV_NAME;
+
+
+/* This driver was written to use PCI memory space, however most versions
+ of the Rhine only work correctly with I/O space accesses. */
+#ifdef CONFIG_VIA_RHINE_MMIO
+#define USE_MEM
+#else
+#define USE_IO
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(backoff, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
+MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. For most of these settings, this driver assumes that they are
+correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of via_rhine_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->priv->lock spinlock. The other thread is the interrupt handler, which
+is single threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. It locks the
+dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
+is not available it stops the transmit queue by calling netif_stop_queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. If at least half of the entries in
+the Rx ring are available the transmit queue is woken up if it was stopped.
+
+IV. Notes
+
+IVb. References
+
+Preliminary VT86C100A manual from http://www.via.com.tw/
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
+
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers, resulting
+in significant performance degradation for bounce buffer copies on transmit
+and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+*/
+
+
+/* This table drives the PCI probe routines. It's mostly boilerplate in all
+ of the drivers, and will likely be provided by some future kernel.
+ Note the matching code -- the first table entry matchs all 56** cards but
+ second only the 1234 card.
+*/
+
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+enum via_rhine_chips {
+ VT86C100A = 0,
+ VT6102,
+ VT6105,
+ VT6105M
+};
+
+struct via_rhine_chip_info {
+ const char *name;
+ u16 pci_flags;
+ int io_size;
+ int drv_flags;
+};
+
+
+enum chip_capability_flags {
+ CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
+ ReqTxAlign=0x10, HasWOL=0x20, };
+
+#ifdef USE_MEM
+#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
+#else
+#define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
+#endif
+/* Beware of PCI posted writes */
+#define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
+
+/* directly indexed by enum via_rhine_chips, above */
+static struct via_rhine_chip_info via_rhine_chip_info[] __devinitdata =
+{
+ { "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
+ CanHaveMII | ReqTxAlign | HasDavicomPhy },
+ { "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
+ CanHaveMII | HasWOL },
+ { "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
+ CanHaveMII | HasWOL },
+ { "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
+ CanHaveMII | HasWOL },
+};
+
+static struct pci_device_id via_rhine_pci_tbl[] __devinitdata =
+{
+ {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
+ {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
+ {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105},
+ {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
+ {0,} /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
+
+
+/* Offsets to the device registers. */
+enum register_offsets {
+ StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+ IntrStatus=0x0C, IntrEnable=0x0E,
+ MulticastFilter0=0x10, MulticastFilter1=0x14,
+ RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+ ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
+ RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
+ StickyHW=0x83, IntrStatus2=0x84, WOLcrClr=0xA4, WOLcgClr=0xA7,
+ PwrcsrClr=0xAC,
+};
+
+/* Bits in ConfigD */
+enum backoff_bits {
+ BackOptional=0x01, BackModify=0x02,
+ BackCaptureEffect=0x04, BackRandom=0x08
+};
+
+#ifdef USE_MEM
+/* Registers we check that mmio and reg are the same. */
+int mmio_verify_registers[] = {
+ RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
+ 0
+};
+#endif
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+ IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
+ IntrPCIErr=0x0040,
+ IntrStatsMax=0x0080, IntrRxEarly=0x0100,
+ IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+ IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+ IntrRxWakeUp=0x8000,
+ IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+ IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
+ IntrTxErrSummary=0x082210,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ s32 rx_status;
+ u32 desc_length; /* Chain flag, Buffer/frame length */
+ u32 addr;
+ u32 next_desc;
+};
+struct tx_desc {
+ s32 tx_status;
+ u32 desc_length; /* Chain flag, Tx Config, Frame length */
+ u32 addr;
+ u32 next_desc;
+};
+
+/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
+#define TXDESC 0x00e08000
+
+enum rx_status_bits {
+ RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
+};
+
+/* Bits in *_desc.*_status */
+enum desc_status_bits {
+ DescOwn=0x80000000
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+ CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
+ CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
+ CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
+ CmdNoTxPoll=0x0800, CmdReset=0x8000,
+};
+
+#define MAX_MII_CNT 4
+struct netdev_private {
+ /* Descriptor rings */
+ struct rx_desc *rx_ring;
+ struct tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
+
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
+
+ /* Tx bounce buffers */
+ unsigned char *tx_buf[TX_RING_SIZE];
+ unsigned char *tx_bufs;
+ dma_addr_t tx_bufs_dma;
+
+ struct pci_dev *pdev;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ spinlock_t lock;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int chip_id, drv_flags;
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ u16 chip_cmd; /* Current setting for ChipCmd */
+
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ u8 tx_thresh, rx_thresh;
+
+ /* MII transceiver section. */
+ unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
+ unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
+ u16 mii_status; /* last read MII status */
+ struct mii_if_info mii_if;
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int via_rhine_open(struct net_device *dev);
+static void via_rhine_check_duplex(struct net_device *dev);
+static void via_rhine_timer(unsigned long data);
+static void via_rhine_tx_timeout(struct net_device *dev);
+static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
+static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void via_rhine_tx(struct net_device *dev);
+static void via_rhine_rx(struct net_device *dev);
+static void via_rhine_error(struct net_device *dev, int intr_status);
+static void via_rhine_set_rx_mode(struct net_device *dev);
+static struct net_device_stats *via_rhine_get_stats(struct net_device *dev);
+#if 0
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static int via_rhine_close(struct net_device *dev);
+
+static inline u32 get_intr_status(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = dev->priv;
+ u32 intr_status;
+
+ intr_status = readw(ioaddr + IntrStatus);
+ /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
+ if (np->chip_id == VT6102)
+ intr_status |= readb(ioaddr + IntrStatus2) << 16;
+ return intr_status;
+}
+
+static void wait_for_reset(struct net_device *dev, int chip_id, char *name)
+{
+ long ioaddr = dev->base_addr;
+ int boguscnt = 20;
+
+ IOSYNC;
+
+ if (readw(ioaddr + ChipCmd) & CmdReset) {
+ printk(KERN_INFO "%s: Reset not complete yet. "
+ "Trying harder.\n", name);
+
+ /* Rhine-II needs to be forced sometimes */
+ if (chip_id == VT6102)
+ writeb(0x40, ioaddr + MiscCmd);
+
+ /* VT86C100A may need long delay after reset (dlink) */
+ /* Seen on Rhine-II as well (rl) */
+ while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
+ udelay(5);
+
+ }
+
+ if (debug > 1)
+ printk(KERN_INFO "%s: Reset %s.\n", name,
+ boguscnt ? "succeeded" : "failed");
+}
+
+#ifdef USE_MEM
+static void __devinit enable_mmio(long ioaddr, int chip_id)
+{
+ int n;
+ if (chip_id == VT86C100A) {
+ /* More recent docs say that this bit is reserved ... */
+ n = inb(ioaddr + ConfigA) | 0x20;
+ outb(n, ioaddr + ConfigA);
+ } else {
+ n = inb(ioaddr + ConfigD) | 0x80;
+ outb(n, ioaddr + ConfigD);
+ }
+}
+#endif
+
+static void __devinit reload_eeprom(long ioaddr)
+{
+ int i;
+ outb(0x20, ioaddr + MACRegEEcsr);
+ /* Typically 2 cycles to reload. */
+ for (i = 0; i < 150; i++)
+ if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
+ break;
+}
+
+static int __devinit via_rhine_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ int i, option;
+ int chip_id = (int) ent->driver_data;
+ static int card_idx = -1;
+ long ioaddr;
+ long memaddr;
+ int io_size;
+ int pci_flags;
+#ifdef USE_MEM
+ long ioaddr0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ card_idx++;
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ io_size = via_rhine_chip_info[chip_id].io_size;
+ pci_flags = via_rhine_chip_info[chip_id].pci_flags;
+
+ if (pci_enable_device (pdev))
+ goto err_out;
+
+ /* this should always be supported */
+ if (pci_set_dma_mask(pdev, 0xffffffff)) {
+ printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
+ goto err_out;
+ }
+
+ /* sanity check */
+ if ((pci_resource_len (pdev, 0) < io_size) ||
+ (pci_resource_len (pdev, 1) < io_size)) {
+ printk (KERN_ERR "Insufficient PCI resources, aborting\n");
+ goto err_out;
+ }
+
+ ioaddr = pci_resource_start (pdev, 0);
+ memaddr = pci_resource_start (pdev, 1);
+
+ if (pci_flags & PCI_USES_MASTER)
+ pci_set_master (pdev);
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (dev == NULL) {
+ printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
+ goto err_out;
+ }
+ SET_MODULE_OWNER(dev);
+
+ if (pci_request_regions(pdev, shortname))
+ goto err_out_free_netdev;
+
+#ifdef USE_MEM
+ ioaddr0 = ioaddr;
+ enable_mmio(ioaddr0, chip_id);
+
+ ioaddr = (long) ioremap (memaddr, io_size);
+ if (!ioaddr) {
+ printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ pdev->slot_name, io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+ /* Check that selected MMIO registers match the PIO ones */
+ i = 0;
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(ioaddr0+reg);
+ unsigned char b = readb(ioaddr+reg);
+ if (a != b) {
+ printk (KERN_ERR "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
+ goto err_out_unmap;
+ }
+ }
+#endif
+
+ /* D-Link provided reset code (with comment additions) */
+ if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
+ unsigned char byOrgValue;
+
+ /* clear sticky bit before reset & read ethernet address */
+ byOrgValue = readb(ioaddr + StickyHW);
+ byOrgValue = byOrgValue & 0xFC;
+ writeb(byOrgValue, ioaddr + StickyHW);
+
+ /* (bits written are cleared?) */
+ /* disable force PME-enable */
+ writeb(0x80, ioaddr + WOLcgClr);
+ /* disable power-event config bit */
+ writeb(0xFF, ioaddr + WOLcrClr);
+ /* clear power status (undocumented in vt6102 docs?) */
+ writeb(0xFF, ioaddr + PwrcsrClr);
+ }
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ dev->base_addr = ioaddr;
+ wait_for_reset(dev, chip_id, shortname);
+
+ /* Reload the station address from the EEPROM. */
+#ifdef USE_IO
+ reload_eeprom(ioaddr);
+#else
+ reload_eeprom(ioaddr0);
+ /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
+ If reload_eeprom() was done first this could be avoided, but it is
+ not known if that still works with the "win98-reboot" problem. */
+ enable_mmio(ioaddr0, chip_id);
+#endif
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
+ goto err_out_unmap;
+ }
+
+ if (chip_id == VT6102) {
+ /*
+ * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
+ * turned on. it makes MAC receive magic packet
+ * automatically. So, we turn it off. (D-Link)
+ */
+ writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
+ }
+
+ /* Select backoff algorithm */
+ if (backoff)
+ writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
+ ioaddr + ConfigD);
+
+ dev->irq = pdev->irq;
+
+ np = dev->priv;
+ spin_lock_init (&np->lock);
+ np->chip_id = chip_id;
+ np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
+ np->pdev = pdev;
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = via_rhine_open;
+ dev->hard_start_xmit = via_rhine_start_tx;
+ dev->stop = via_rhine_close;
+ dev->get_stats = via_rhine_get_stats;
+ dev->set_multicast_list = via_rhine_set_rx_mode;
+#if 0
+ dev->do_ioctl = netdev_ioctl;
+#endif
+ dev->tx_timeout = via_rhine_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#if 0
+ if (np->drv_flags & ReqTxAlign)
+#endif
+ dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+ /* dev->name not defined before register_netdev()! */
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->mii_if.full_duplex = 1;
+ np->default_port = option & 15;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii_if.full_duplex = 1;
+
+ if (np->mii_if.full_duplex) {
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->mii_if.force_media = 1;
+ }
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, via_rhine_chip_info[chip_id].name,
+ (pci_flags & PCI_USES_IO) ? ioaddr : memaddr);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Standard for this chip. */
+ for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->mii_if.advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x Link %4.4x.\n",
+ dev->name, phy, mii_status, np->mii_if.advertising,
+ mdio_read(dev, phy, 5));
+
+ /* set IFF_RUNNING */
+ if (mii_status & BMSR_LSTATUS)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ np->mii_if.phy_id = np->phys[0];
+ }
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->mii_if.full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ /* FIXME: shouldn't someone check this variable? */
+ /* np->medialock = 1; */
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (option & 0x220 ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], MII_BMCR,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ alert_slow_netdevice(dev, (char *)via_rhine_chip_info[chip_id].name);
+
+ return 0;
+
+err_out_unmap:
+#ifdef USE_MEM
+ iounmap((void *)ioaddr);
+err_out_free_res:
+#endif
+ pci_release_regions(pdev);
+err_out_free_netdev:
+ kfree (dev);
+err_out:
+ return -ENODEV;
+}
+
+static int alloc_ring(struct net_device* dev)
+{
+ struct netdev_private *np = dev->priv;
+ void *ring;
+ dma_addr_t ring_dma;
+
+ ring = pci_alloc_consistent(np->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma);
+ if (!ring) {
+ printk(KERN_ERR "Could not allocate DMA memory.\n");
+ return -ENOMEM;
+ }
+ if (np->drv_flags & ReqTxAlign) {
+ np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
+ &np->tx_bufs_dma);
+ if (np->tx_bufs == NULL) {
+ pci_free_consistent(np->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
+ return -ENOMEM;
+ }
+ }
+
+ np->rx_ring = ring;
+ np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
+ np->rx_ring_dma = ring_dma;
+ np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
+
+ return 0;
+}
+
+void free_ring(struct net_device* dev)
+{
+ struct netdev_private *np = dev->priv;
+
+ pci_free_consistent(np->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ np->rx_ring, np->rx_ring_dma);
+ np->tx_ring = NULL;
+
+ if (np->tx_bufs)
+ pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
+ np->tx_bufs, np->tx_bufs_dma);
+
+ np->tx_bufs = NULL;
+
+}
+
+static void alloc_rbufs(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ dma_addr_t next;
+ int i;
+
+ np->dirty_rx = np->cur_rx = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->rx_head_desc = &np->rx_ring[0];
+ next = np->rx_ring_dma;
+
+ /* Init the ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
+ next += sizeof(struct rx_desc);
+ np->rx_ring[i].next_desc = cpu_to_le32(next);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+
+ np->rx_skbuff_dma[i] =
+ pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
+ np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+}
+
+static void free_rbufs(struct net_device* dev)
+{
+ struct netdev_private *np = dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pdev,
+ np->rx_skbuff_dma[i],
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+}
+
+static void alloc_tbufs(struct net_device* dev)
+{
+ struct netdev_private *np = dev->priv;
+ dma_addr_t next;
+ int i;
+
+ np->dirty_tx = np->cur_tx = 0;
+ next = np->tx_ring_dma;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].tx_status = 0;
+ np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+ next += sizeof(struct tx_desc);
+ np->tx_ring[i].next_desc = cpu_to_le32(next);
+ np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
+ }
+ np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
+
+}
+
+static void free_tbufs(struct net_device* dev)
+{
+ struct netdev_private *np = dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].tx_status = 0;
+ np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+ np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (np->tx_skbuff[i]) {
+ if (np->tx_skbuff_dma[i]) {
+ pci_unmap_single(np->pdev,
+ np->tx_skbuff_dma[i],
+ np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(np->tx_skbuff[i]);
+ }
+ np->tx_skbuff[i] = 0;
+ np->tx_buf[i] = 0;
+ }
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ /* Configure initial FIFO thresholds. */
+ writeb(0x20, ioaddr + TxConfig);
+ np->tx_thresh = 0x20;
+ np->rx_thresh = 0x60; /* Written in via_rhine_set_rx_mode(). */
+ np->mii_if.full_duplex = 0;
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ writel(np->rx_ring_dma, ioaddr + RxRingPtr);
+ writel(np->tx_ring_dma, ioaddr + TxRingPtr);
+
+ via_rhine_set_rx_mode(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+
+ np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
+ if (np->mii_if.force_media)
+ np->chip_cmd |= CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+
+ via_rhine_check_duplex(dev);
+
+ /* The LED outputs of various MII xcvrs should be configured. */
+ /* For NS or Mison phys, turn on bit 1 in register 0x17 */
+ /* For ESI phys, turn on bit 7 in register 0x17. */
+ mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
+ (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
+}
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+ writeb(0x00, ioaddr + MIICmd);
+ writeb(phy_id, ioaddr + MIIPhyAddr);
+ writeb(regnum, ioaddr + MIIRegAddr);
+ writeb(0x40, ioaddr + MIICmd); /* Trigger read */
+ boguscnt = 1024;
+ while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
+ ;
+ return readw(ioaddr + MIIData);
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
+ if (phy_id == np->phys[0]) {
+ switch (regnum) {
+ case MII_BMCR: /* Is user forcing speed/duplex? */
+ if (value & 0x9000) /* Autonegotiation. */
+ np->mii_if.force_media = 0;
+ else
+ np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case MII_ADVERTISE:
+ np->mii_if.advertising = value;
+ break;
+ }
+ }
+
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+ writeb(0x00, ioaddr + MIICmd);
+ writeb(phy_id, ioaddr + MIIPhyAddr);
+ writeb(regnum, ioaddr + MIIRegAddr);
+ writew(value, ioaddr + MIIData);
+ writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
+}
+
+
+static int via_rhine_open(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev);
+ if (i)
+ return i;
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
+ dev->name, np->pdev->irq);
+
+ i = alloc_ring(dev);
+ if (i)
+ return i;
+ alloc_rbufs(dev);
+ alloc_tbufs(dev);
+ wait_for_reset(dev, np->chip_id, dev->name);
+ init_registers(dev);
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
+ "MII status: %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd),
+ mdio_read(dev, np->phys[0], MII_BMSR));
+
+ netif_start_queue(dev);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 2;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &via_rhine_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void via_rhine_check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ int negotiated = mii_lpa & np->mii_if.advertising;
+ int duplex;
+
+ if (np->mii_if.force_media || mii_lpa == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->mii_if.full_duplex != duplex) {
+ np->mii_if.full_duplex = duplex;
+ if (debug)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], mii_lpa);
+ if (duplex)
+ np->chip_cmd |= CmdFDuplex;
+ else
+ np->chip_cmd &= ~CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+ }
+}
+
+
+static void via_rhine_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+ int mii_status;
+
+ if (debug > 3) {
+ printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
+ dev->name, readw(ioaddr + IntrStatus));
+ }
+
+ spin_lock_irq (&np->lock);
+
+ via_rhine_check_duplex(dev);
+
+ /* make IFF_RUNNING follow the MII status bit "Link established" */
+ mii_status = mdio_read(dev, np->phys[0], MII_BMSR);
+ if ( (mii_status & BMSR_LSTATUS) != (np->mii_status & BMSR_LSTATUS) ) {
+ if (mii_status & BMSR_LSTATUS)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ }
+ np->mii_status = mii_status;
+
+ spin_unlock_irq (&np->lock);
+
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+
+static void via_rhine_tx_timeout (struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, readw (ioaddr + IntrStatus),
+ mdio_read (dev, np->phys[0], MII_BMSR));
+
+ dev->if_port = 0;
+
+ /* protect against concurrent rx interrupts */
+ disable_irq(np->pdev->irq);
+
+ spin_lock(&np->lock);
+
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ /* clear all descriptors */
+ free_tbufs(dev);
+ free_rbufs(dev);
+ alloc_tbufs(dev);
+ alloc_rbufs(dev);
+
+ /* Reinitialize the hardware. */
+ wait_for_reset(dev, np->chip_id, dev->name);
+ init_registers(dev);
+
+ spin_unlock(&np->lock);
+ enable_irq(np->pdev->irq);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ unsigned entry;
+ u32 intr_status;
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ if (skb->len < ETH_ZLEN) {
+#if 0
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+#else
+ memset(np->tx_buf[entry], 0, ETH_ZLEN);
+#endif
+ }
+
+ np->tx_skbuff[entry] = skb;
+
+#if 0
+ if ((np->drv_flags & ReqTxAlign) &&
+ (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)
+ ) {
+#endif
+ /* Must use alignment buffer. */
+ if (skb->len > PKT_BUF_SZ) {
+ /* packet too long, drop it */
+ dev_kfree_skb(skb);
+ np->tx_skbuff[entry] = NULL;
+ np->stats.tx_dropped++;
+ return 0;
+ }
+#if 0
+ skb_copy_and_csum_dev(skb, np->tx_buf[entry]);
+#else
+ skb_copy_bits(skb, 0, np->tx_buf[entry], skb->len);
+#endif
+ np->tx_skbuff_dma[entry] = 0;
+ np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
+ (np->tx_buf[entry] - np->tx_bufs));
+#if 0
+ } else {
+ np->tx_skbuff_dma[entry] =
+ pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
+ }
+#endif
+
+ np->tx_ring[entry].desc_length =
+ cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+ /* lock eth irq */
+ spin_lock_irq (&np->lock);
+ wmb();
+ np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ wmb();
+
+ np->cur_tx++;
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /*
+ * Wake the potentially-idle transmit channel unless errors are
+ * pending (the ISR must sort them out first).
+ */
+ intr_status = get_intr_status(dev);
+ if ((intr_status & IntrTxErrSummary) == 0) {
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ }
+ IOSYNC;
+
+ if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
+ netif_stop_queue(dev);
+
+ dev->trans_start = jiffies;
+
+ spin_unlock_irq (&np->lock);
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx-1, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = dev_instance;
+ long ioaddr;
+ u32 intr_status;
+ int boguscnt = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+
+ while ((intr_status = get_intr_status(dev))) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ if (intr_status & IntrTxDescRace)
+ writeb(0x08, ioaddr + IntrStatus2);
+ writew(intr_status & 0xffff, ioaddr + IntrStatus);
+ IOSYNC;
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
+ via_rhine_rx(dev);
+
+ if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
+ if (intr_status & IntrTxErrSummary) {
+ int cnt = 20;
+ /* Avoid scavenging before Tx engine turned off */
+ while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
+ udelay(5);
+ if (debug > 2 && !cnt)
+ printk(KERN_WARNING "%s: via_rhine_interrupt() "
+ "Tx engine still on.\n",
+ dev->name);
+ }
+ via_rhine_tx(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange |
+ IntrStatsMax | IntrTxError | IntrTxAborted |
+ IntrTxUnderrun | IntrTxDescRace))
+ via_rhine_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=%#8.8x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ }
+
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
+ dev->name, readw(ioaddr + IntrStatus));
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity. */
+static void via_rhine_tx(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
+
+ spin_lock (&np->lock);
+
+ /* find and cleanup dirty tx descriptors */
+ while (np->dirty_tx != np->cur_tx) {
+ txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
+ if (debug > 6)
+ printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
+ entry, txstatus);
+ if (txstatus & DescOwn)
+ break;
+ if (txstatus & 0x8000) {
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ np->stats.tx_errors++;
+ if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
+ if (txstatus & 0x0200) np->stats.tx_window_errors++;
+ if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
+ if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
+ if (((np->chip_id == VT86C100A) && txstatus & 0x0002) ||
+ (txstatus & 0x0800) || (txstatus & 0x1000)) {
+ np->stats.tx_fifo_errors++;
+ np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ break; /* Keep the skb - we try again */
+ }
+ /* Transmitter restarted in 'abnormal' handler. */
+ } else {
+ if (np->chip_id == VT86C100A)
+ np->stats.collisions += (txstatus >> 3) & 0x0F;
+ else
+ np->stats.collisions += txstatus & 0x0F;
+ if (debug > 6)
+ printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
+ (txstatus >> 3) & 0xF,
+ txstatus & 0xF);
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ if (np->tx_skbuff_dma[entry]) {
+ pci_unmap_single(np->pdev,
+ np->tx_skbuff_dma[entry],
+ np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ entry = (++np->dirty_tx) % TX_RING_SIZE;
+ }
+ if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
+ netif_wake_queue (dev);
+
+ spin_unlock (&np->lock);
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity and better register allocation. */
+static void via_rhine_rx(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: via_rhine_rx(), entry %d status %8.8x.\n",
+ dev->name, entry, le32_to_cpu(np->rx_head_desc->rx_status));
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ struct rx_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->rx_status);
+ int data_size = desc_status >> 16;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " via_rhine_rx() status is %8.8x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
+ if ((desc_status & RxWholePkt) != RxWholePkt) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %8.8x!\n",
+ dev->name, entry, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, np->rx_head_desc, &np->rx_ring[entry]);
+ np->stats.rx_length_errors++;
+ } else if (desc_status & RxErr) {
+ /* There was a error. */
+ if (debug > 2)
+ printk(KERN_DEBUG " via_rhine_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x0030) np->stats.rx_length_errors++;
+ if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
+ if (desc_status & 0x0004) np->stats.rx_frame_errors++;
+ if (desc_status & 0x0002) {
+ /* this can also be updated outside the interrupt handler */
+ spin_lock (&np->lock);
+ np->stats.rx_crc_errors++;
+ spin_unlock (&np->lock);
+ }
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+ int pkt_len = data_size - 4;
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single(np->pdev, np->rx_skbuff_dma[entry],
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ /* *_IP_COPYSUM isn't defined anywhere and eth_copy_and_sum
+ is memcpy for all archs so this is kind of pointless right
+ now ... or? */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb = np->rx_skbuff[entry];
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+ dev->name);
+ break;
+ }
+ np->rx_skbuff[entry] = NULL;
+ skb_put(skb, pkt_len);
+ pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_bytes += pkt_len;
+ np->stats.rx_packets++;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_skbuff_dma[entry] =
+ pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
+ }
+ np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+ }
+
+ /* Pre-emptively restart Rx engine. */
+ writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
+ dev->base_addr + ChipCmd);
+}
+
+/* Clears the "tally counters" for CRC errors and missed frames(?).
+ It has been reported that some chips need a write of 0 to clear
+ these, for others the counters are set to 1 when written to and
+ instead cleared when read. So we clear them both ways ... */
+static inline void clear_tally_counters(const long ioaddr)
+{
+ writel(0, ioaddr + RxMissed);
+ readw(ioaddr + RxCRCErrs);
+ readw(ioaddr + RxMissed);
+}
+
+static void via_rhine_restart_tx(struct net_device *dev) {
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ u32 intr_status;
+
+ /*
+ * If new errors occured, we need to sort them out before doing Tx.
+ * In that case the ISR will be back here RSN anyway.
+ */
+ intr_status = get_intr_status(dev);
+
+ if ((intr_status & IntrTxErrSummary) == 0) {
+
+ /* We know better than the chip where it should continue. */
+ writel(np->tx_ring_dma + entry * sizeof(struct tx_desc),
+ ioaddr + TxRingPtr);
+
+ writew(CmdTxDemand | np->chip_cmd, ioaddr + ChipCmd);
+ IOSYNC;
+ }
+ else {
+ /* This should never happen */
+ if (debug > 1)
+ printk(KERN_WARNING "%s: via_rhine_restart_tx() "
+ "Another error occured %8.8x.\n",
+ dev->name, intr_status);
+ }
+
+}
+
+static void via_rhine_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+
+ spin_lock (&np->lock);
+
+ if (intr_status & (IntrLinkChange)) {
+ if (readb(ioaddr + MIIStatus) & 0x02) {
+ /* Link failed, restart autonegotiation. */
+ if (np->drv_flags & HasDavicomPhy)
+ mdio_write(dev, np->phys[0], MII_BMCR, 0x3300);
+ } else
+ via_rhine_check_duplex(dev);
+ if (debug)
+ printk(KERN_ERR "%s: MII status changed: Autonegotiation "
+ "advertising %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], MII_ADVERTISE),
+ mdio_read(dev, np->phys[0], MII_LPA));
+ }
+ if (intr_status & IntrStatsMax) {
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ clear_tally_counters(ioaddr);
+ }
+ if (intr_status & IntrTxAborted) {
+ if (debug > 1)
+ printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
+ dev->name, intr_status);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if (np->tx_thresh < 0xE0)
+ writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+ if (debug > 1)
+ printk(KERN_INFO "%s: Transmitter underrun, Tx "
+ "threshold now %2.2x.\n",
+ dev->name, np->tx_thresh);
+ }
+ if (intr_status & IntrTxDescRace) {
+ if (debug > 2)
+ printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
+ dev->name);
+ }
+ if (intr_status & ( IntrTxAborted | IntrTxUnderrun | IntrTxDescRace ))
+ via_rhine_restart_tx(dev);
+
+ if (intr_status & ~( IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
+ IntrTxError | IntrTxAborted | IntrNormalSummary |
+ IntrTxDescRace )) {
+ if (debug > 1)
+ printk(KERN_ERR "%s: Something Wicked happened! %8.8x.\n",
+ dev->name, intr_status);
+ }
+
+ spin_unlock (&np->lock);
+}
+
+static struct net_device_stats *via_rhine_get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->lock, flags);
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ clear_tally_counters(ioaddr);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return &np->stats;
+}
+
+static void via_rhine_set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = 0x1C;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ writel(0xffffffff, ioaddr + MulticastFilter0);
+ writel(0xffffffff, ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
+ }
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ }
+ writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
+}
+
+#if 0
+static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
+{
+ struct netdev_private *np = dev->priv;
+ u32 ethcmd;
+
+ if (get_user(ethcmd, (u32 *)useraddr))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+ strcpy (info.driver, DRV_NAME);
+ strcpy (info.version, DRV_VERSION);
+ strcpy (info.bus_info, np->pdev->slot_name);
+ if (copy_to_user (useraddr, &info, sizeof (info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get settings */
+ case ETHTOOL_GSET: {
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ if (!(np->drv_flags & CanHaveMII))
+ break;
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, &ecmd);
+ spin_unlock_irq(&np->lock);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set settings */
+ case ETHTOOL_SSET: {
+ int r;
+ struct ethtool_cmd ecmd;
+ if (!(np->drv_flags & CanHaveMII))
+ break;
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ spin_lock_irq(&np->lock);
+ r = mii_ethtool_sset(&np->mii_if, &ecmd);
+ spin_unlock_irq(&np->lock);
+ return r;
+ }
+ /* restart autonegotiation */
+ case ETHTOOL_NWAY_RST: {
+ if (!(np->drv_flags & CanHaveMII))
+ break;
+ return mii_nway_restart(&np->mii_if);
+ }
+ /* get link status */
+ case ETHTOOL_GLINK: {
+ struct ethtool_value edata = {ETHTOOL_GLINK};
+ if (!(np->drv_flags & CanHaveMII))
+ break;
+ edata.data = mii_link_ok(&np->mii_if);
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get message-level */
+ case ETHTOOL_GMSGLVL: {
+ struct ethtool_value edata = {ETHTOOL_GMSGLVL};
+ edata.data = debug;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+ /* set message-level */
+ case ETHTOOL_SMSGLVL: {
+ struct ethtool_value edata;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ debug = edata.data;
+ return 0;
+ }
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = dev->priv;
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == SIOCETHTOOL)
+ rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
+
+ else {
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+ }
+
+ return rc;
+}
+#endif
+
+static int via_rhine_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = dev->priv;
+
+ del_timer_sync(&np->timer);
+
+ spin_lock_irq(&np->lock);
+
+ netif_stop_queue(dev);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd));
+
+ /* Switch to loopback mode to avoid hardware races. */
+ writeb(np->tx_thresh | 0x02, ioaddr + TxConfig);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writew(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writew(CmdStop, ioaddr + ChipCmd);
+
+ spin_unlock_irq(&np->lock);
+
+ free_irq(np->pdev->irq, dev);
+ free_rbufs(dev);
+ free_tbufs(dev);
+ free_ring(dev);
+
+ return 0;
+}
+
+
+static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+
+ pci_release_regions(pdev);
+
+#ifdef USE_MEM
+ iounmap((char *)(dev->base_addr));
+#endif
+
+ kfree(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+
+static struct pci_driver via_rhine_driver = {
+ .name = "via-rhine",
+ .id_table = via_rhine_pci_tbl,
+ .probe = via_rhine_init_one,
+ .remove = __devexit_p(via_rhine_remove_one),
+};
+
+
+static int __init via_rhine_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init (&via_rhine_driver);
+}
+
+
+static void __exit via_rhine_cleanup (void)
+{
+ pci_unregister_driver (&via_rhine_driver);
+}
+
+
+module_init(via_rhine_init);
+module_exit(via_rhine_cleanup);
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
--- /dev/null
+/*
+ * crc32.h for early Linux 2.4.19pre kernel inclusion
+ * This defines ether_crc_le() and ether_crc() as inline functions
+ * This is slated to change to using the library crc32 functions
+ * as kernel 2.5.2 included at some future date.
+ */
+#ifndef _LINUX_CRC32_H
+#define _LINUX_CRC32_H
+
+#include <linux/types.h>
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+ while (--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ?
+ ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+#endif /* _LINUX_CRC32_H */
#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */
#define ADVERTISE_NPAGE 0x8000 /* Next page bit */
+#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
+ ADVERTISE_CSMA)
#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
ADVERTISE_100HALF | ADVERTISE_100FULL)
#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
#define NWAYTEST_RESV2 0xfe00 /* Unused... */
+
+struct mii_if_info {
+ int phy_id;
+ int advertising;
+ int phy_id_mask;
+ int reg_num_mask;
+
+ unsigned int full_duplex : 1; /* is full duplex? */
+ unsigned int force_media : 1; /* is autoneg. disabled? */
+
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+struct ethtool_cmd;
+struct mii_ioctl_data;
+
+#if 0
+extern int mii_link_ok (struct mii_if_info *mii);
+extern int mii_nway_restart (struct mii_if_info *mii);
+extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_check_link (struct mii_if_info *mii);
+extern unsigned int mii_check_media (struct mii_if_info *mii,
+ unsigned int ok_to_print,
+ unsigned int init_media);
+extern int generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_changed);
+#endif
+
+
/* This structure is used in all SIOCxMIIxxx ioctl calls */
struct mii_ioctl_data {
u16 phy_id;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern struct sk_buff * skb_checksum_help(struct sk_buff *skb);
+extern void alert_slow_netdevice(struct net_device *dev, char *nictype);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_DEV_H */
int offset, void *to, int len);
extern void skb_init(void);
+extern int skb_linearize(struct sk_buff *skn, int gfp_mask);
+
#endif /* _LINUX_SKBUFF_H */
vif->total_packets_sent++;
vif->total_bytes_sent += tx->size;
+ /* Is the NIC crap? */
+ if ( !(dev->features & NETIF_F_SG) )
+ skb_linearize(skb, GFP_KERNEL);
+
/* Transmit should always work, or the queue would be stopped. */
if ( dev->hard_start_xmit(skb, dev) != 0 )
{
return n;
}
+/* Keep head the same: replace data */
+int skb_linearize(struct sk_buff *skb, int gfp_mask)
+{
+ unsigned int size;
+ u8 *data;
+ long offset;
+ int headerlen = skb->data - skb->head;
+ int expand = (skb->tail+skb->data_len) - skb->end;
+
+ if (skb_shinfo(skb)->nr_frags == 0)
+ return 0;
+
+ if (expand <= 0)
+ expand = 0;
+
+ size = (skb->end - skb->head + expand);
+ size = SKB_DATA_ALIGN(size);
+ data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ if (data == NULL)
+ return -ENOMEM;
+
+ /* Copy entire thing */
+ if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
+ BUG();
+
+ /* Offset between the two in bytes */
+ offset = data - skb->head;
+
+ /* Free old data. */
+ skb_release_data(skb);
+
+ skb->head = data;
+ skb->end = data + size;
+
+ /* Set up new pointers */
+ skb->h.raw += offset;
+ skb->nh.raw += offset;
+ skb->mac.raw += offset;
+ skb->tail += offset;
+ skb->data += offset;
+
+ skb->skb_type = SKB_NORMAL;
+
+ /* Set up shinfo */
+ skb_shinfo(skb)->nr_frags = 0;
+
+ skb->tail += skb->data_len;
+ skb->data_len = 0;
+ return 0;
+}
+
/* Copy some data bits from skb to kernel buffer. */
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)